summaryrefslogtreecommitdiff
path: root/python/helpers
diff options
context:
space:
mode:
authorTor Norbye <tnorbye@google.com>2014-07-25 12:24:15 -0700
committerTor Norbye <tnorbye@google.com>2014-07-25 12:24:21 -0700
commit2e5965e996aad62ab1338b09d54caaf99ff3dd6a (patch)
tree68aa5081765150003d05155215812e629de09ec8 /python/helpers
parent65f60eb9011bb2c549a6d83ae31257480368ddc5 (diff)
downloadidea-2e5965e996aad62ab1338b09d54caaf99ff3dd6a.tar.gz
Snapshot idea/138.1283 from git://git.jetbrains.org/idea/community.git
047cbb9: IDEA-127620 - Clouds: move frameworks to the top level in new module/project wizard - refactoring of import 1d1914f: IDEA-127620 - Clouds: move frameworks to the top level in new module/project wizard - remove from JavaEE group 3206e15: Merge remote-tracking branch 'origin/master' a513e70: enable injected context for actions in action popups (IDEA-126995) 70c5d69: CommentInjector: improved detection and IDEA-114915 InjectorUtils performance 482cdc2: @MagicConstant added 997dc0b: do not show VM names in run configurations: junit, application (IDEA-126880) 9873768: merge commands to replace duplicates (IDEA-126776) e9a17df: field can be local: when field is used in method for read it can't be converted to local if used in another methods (IDEA-127159) f9e6f06: faster LexerEditorHighlighter.checkContentIsEqualTo 6051f49: generic debugger settings: step 3 — root settings (Debugger node content) — Java transformed bcb94d7: cleanup a12ac6c: treat Diagnostic.Kind.NOTE as information message, not a warning message (IDEA-105383) f367dcb: IDEA-127666 Add tooltip to "Project Setting" icon 08d9677: add delegate method to TextFieldWithHistoryWitBrowseButton ce0d23f: change messages for postfix templates configurable d7d7e88: platform: syntax compatibility with antiquated Python versions for environment loader 2a10168: SwingHelper moved to platform-impl 0299b30: auto-import context variable declaration type when completing (IDEA-90157) e353c2f: prefer some well-known java util classes in completion (IDEA-100669) f81e96e: java chained completion: qualifier class items shouldn't be inserted as constructors 7cb41f8: Update image to visualize the project-based configurable in the Settings dialog. e1d651b: Cleanup (formatting) a7e55c6: time limit for constant search 9cce5c7: do not split annotations on multiple lines for params (IDEA-127017) 0aa3890: simplified, added filtering for subsequent parameter pairs: "key, value", "first, second" d477f91: IDEA-59662: Provide ability to specify right margin (columns) for each file type separately (enabled for PHP) 9b820f7: IDEA-127679 ArrayIndexOutOfBoundsException if no default or focused button is specified 4c997e0: Prevent NPE in AbstractPopup and add corresponding logging 5a49a15: fix inset: VerticalFlowLayout incorrectly uses vertical gap as top inset ece44da: cleanup 5a46a42: generic debugger settings: step 3 — root settings (Debugger node content) 04518cd: Inspections settings ui: tools not initialized if no necessity to apply inspections filter da81c5d: ProjectFileIndex#isIgnored renamed to 'isExcluded' 0e67032: Return accidentally removed PyStringLiteralFixer 3089c9c: PY-12877 Complete statement doesn't insert colon for 'with' statement 1c70a28: IG inline fixes: use inline method processor; if simple getter is inlined in simple expr statement - it should be deleted (IDEA-127135) 8835827: logging for (IDEA-127288) b585c95: inline method: check unique names for this variable in initial class scope (IDEA-127411) ccc235e: tests: show warning when only ignored tests were detected (IDEA-127660) ad42de6: vcs: reformat and cleanup e9fe369: fixed starting jps-builders tests 9d121c2: fix disposeUIResources if merged 2e780d7: turn off contract inference for overrideable methods: fix logic 1bf7333: don't measure MinusculeMatcher performance 2eae3c8: contract inference: take explicit parameter notnull into account (IDEA-127667) 8851417: non-code gutter: get annotations for bytecode psi when highlighting library source 6608b52: generic debugger settings: step 2 — stepping — transform Groovy, all Stepping configurable on one page e46729f: cleanup dd0fd0f: ContainerUtil notNullize/nullize 3d30b27: extract EMPTY_SETTINGS_PANEL_PROVIDER efe0d5a: Inspections settings ui: filter actions in toolbar added 006705c: DefaultVcsRootPolicy refactoring: 'isIgnored' check moved from 'matchesDefaultMapping' implementations to 'getMappingFor' 363b2cd: simplification 12a3d4f: platform: minor optimization (do not rename/delete directory recursively) 9a80d33: vfs: on create/delete notifications mark a child dirty as well 74d6cb2: diff: use separate action for 'roolback' for file (VCS action) and 'roolback' for local changes (LST action) 8b69a0f: diff: fix access to the invalid ranges 1f4de9d: diff: diff from LineStatusTracker 950a80a: IDEA-110611 diff: Ability to revert lines in selection b6cd05a: diff: small fixes in LineStatusTracker db873ee: Use Guava's immutable lists instead of static initializers in PySmartEnterProcessor b1b8116: PY-9176 Converting % to format() doesn't handle multiline string literals 3d377d9: trigger automake for deleted files 1101b55: Merge remote-tracking branch 'origin/master' 3bf8f90: fix paint track 1e1a9c7: IDEA-61550 respect EditorFilter 831cecb: Merge remote-tracking branch 'origin/master' 28dd985: make editor vertical scrollbar bg the same as editor bg on Mac c3a963a: fix preferred height for JComboBox too da026c5: override ControlDoubleClick+Left/Right, make new bindings switchable 222d8ef: bind 'clone caret' actions to ControlDoubleClick+Up/Down 41034f6: DirectoryInfo: extracted sub-class 693b2a2: DirectoryIndexTest: renames a06bd87: Inspections settings ui: added mask and fixed height of severity icon 73dfd94: parameter name folding reducing: - do not inline if parameter length is less than 3 - do not inline if two subsequent parameters has interval names (beginIndex, endIndex; startId, endId; from, to...) 90d58be: DirectoryIndex: removed unnecessary isProjectExcludeRoot method 359bc94: DirectoryInfo simplification: use booleans instead of bit flags because now we have no so many instance of this class (CR-IC-5807) df51a0d: Inspections settings ui: severity colors in tree showed only for enabled inspections 931d600: Inspections ui: expand/collapse tree fixed c0d2162: EA-58395 - NPE: PerlMatcher.matches d203d8f: bytecode-inferred annotations are only available for compiled elements (IDEA-127608) 5c1256f: Merge remote-tracking branch 'origin/master' bacc3c2: lambda -> method refs: collapse when acceptable method without supers found 3bed881: Merge remote-tracking branch 'origin/master' dd4ddfb: method ref -> lambda: names based on resolved method (IDEA-126977) 4b7122b: shorten FQNs when insert casts (IDEA-127573) 91f505e: create static method from usage in interface (IDEA-127563) 124d774: generic debugger settings: step 2 — stepping 72a3ffa: Cleanup (formatting) 7016425: Cleanup (two utility classes merged) ff16ce7: update about graphics 02939b8: add problems listener to update counter 22a3964: +getList() to access configurables list 79a933c: update problems counter for Darcula b198c6a: add Problems counter 67cbc12: allow to override layouting algorithm 43ab48e: + getErrorsCount() 72925df: pointless expressions: do not simplify a - b - b to a - 0 824f432: workaround for <p/> inside <pre> in javadoc preview in JEditorPane (IDEA-127430) ae4f3d9: unwrap: adjust caret before RParenth (IDEA-127580) a46f4ce: anonym -> lambda: disable in case when inferred type differs from type of anonymous class and parameter of param type is used inside (IDEA-127603); BTW param.toString would be also rejected 89dc528: new inference: no substitution during most specific inference by means of new spec (IDEA-127584) fb786d3: SSR: improve annotation name value pair matching 20cb223: SSR: renaming 6af6e5d: SSR: Nullable annotations 912772c: SSR: remove unnecessary field 1017a77: SSR: remove a bit of dead code 04d10f8: IDEA-127337 Popups in editor sometimes aren't shown on first invocation f18c04d: jps plugins: added extension to allow plugins contribute to JPS model even if they don't contribute to external build 5c81146: generic debugger settings: step 1 — data views — merge settings (generic and custom) into one page 4a15d1b: add copyright 27fddc2: generic debugger settings: step 1 — data views e6d092e: cleanup 343e145: cleanup 90cc9e3: cleanup 30c7264: cleanup 31d8039: cleanup 322e539: IDEA-127077 (highlighting test) 4d7f4ab: move PropertiesComponentImpl to core-impl 2495d31: runtime-opcodes support f3ffe1a: java-analysis-impl doesn't depend on platform-api 68de28e: use JBColor fd0bb1e: IDEA-127620 - Clouds: move frameworks to the top level in new module/project wizard 0cf69f8: bytecode analysis: resolve only wrt PsiClassOwner (IDEA-127608) 9ca2a55: reverted e442926 because there is no thread visibility problem right now (per discussion with Roman.Shevchenko) 8e74038: additional tracing code for difference of this session and previous session of particular content calculation bcd970c: Nullable / NotNull 62a0104: revert: Ctrl-N doesn't show after delete; no input field for Ctrl-N etc 9b4912c: lambda: do not start inference from void type (IDEA-127596) 6de6ee1: delegate methods: do not suggest to delegate when delegate would override final (IDEA-127605) 9ff4e24: plugins update dialog layout (IDEA-127462) f03ca65: create class: filter out generated roots (IDEA-127562) 455fc35: move destination: fix laziness d93d0ab: Workaround for another JDK bug in cached popups. a4cdd68: update grails project structure: remove proper instance of ContentEntry (IDEA-120622) using urls since ContentEntry uses default equals/hashcode implementation 14d57e4: Cleanup (formatting) e35d9d8: IDEA-127077 (incorrect parsing of type annotations) 653ccf4: move to the right module 0e44e1c: Merge remote-tracking branch 'origin/master' afc422e: fix errors foreground for Darcula f38dd67: Merge remote-tracking branch 'origin/master' bbb42a45d: IDEA-126982 (Add an inspection to flag a nonfinal class that only has private constructors) c54ff24: fix typos 8ae3019: [git tests] assume git version is supported 1e79d7b: [log] Simplify & fix refs comparison API & implementation dd0a29c: [git] fix refs comparator for the case when refs have equal names db96ca3: [git] remove trivial test setup 7f6d1ce: [git] rename test class to match the production class name d94a9bf: simplify:inline variale a2d93f2: IDEA-126389 FromString ClosureParams Hint should use method.getTypeParametersList() as a context for creating type because method does not process its type parameters in processDeclarations(). Use method as a context if it does not contain type parameter list a2221d5: IDEA-126334 Correct declaration scope for light parameters and getReturnTypeElementGroovy for light methods 5ccb872: IDEA-126389 FromString ClosureParams Hint should use method.getTypeParametersList() as a context for creating type because method does not process its type parameters in processDeclarations() b43f06c: make another IG test light 09fafa8: IDEA-127263 ("Conditional Expression With Identical Branches" deleting non-identical branches.) feeb54a: SSR: remove unused method d5ce2c1: SSR: match name value pairs when value is not specified d61fdb0: fix typo 55eb67a: fixing JspAutoImportTest: auto-import classes at statement start 455b352: cleanup. remove unused fields 42c3f0d: change links protocol 05a4407: add hyperlink listener 9906008: Merge remote-tracking branch 'origin/master' 25f1003: stub for ordered scopes problem 7e4a359: Merge branch 'master' of git.labs.intellij.net:idea/community 3559eb7: bundling ecj-4.4 java compiler 0d34ff1: PY-4186 Lettuce BDD tool support (initial commit of lettuce runner) 640c9a6: Merge remote-tracking branch 'origin/master' e442926: IDEA-117507 NullPointerException on startup of IDEA 13 d2be1f3: disable hard consistency check 5cc3a16: fix mistype in html+ increase space between elements 2dc1b7c: Log popup window size before showing 1a20228: showing build attempt in logs and in title bar only 2df962a: scopes: include module in file pattern as matching would be calculated against it anyway (IDEA-127397) d60644a: Resize a popup window according to the preferred size of the component instead of its size. b237157: do not lock UI during plugin download (IDEA-127454) 067ef9e: method refs: check first parameter if it could be a receiver for varargs methods (IDEA-127506) eff4567: compose error message (IDEA-127534) 3bc9fa3: IDEA-111466 Mac OS X keyboard shortcuts with alt don't work under Oracle Java 7 6bf1409: postfix template fix ternary operator + test c6ff466: IDEA-111466 Mac OS X keyboard shortcuts with alt don't work under Oracle Java 7 55a8bd4: DebuggerSettings — use PersistentStateComponent instead of deprecated JDOMExternalizable 189f3f5: cleanup 402ccab: DBE-228: column margins 333636c: Replace comma with ampersand 68ec56f: Add groupId for top-level configurable for Junior IDEs. 2ff73e2: don't recreate code fragment psi (IDEA-127039) 5379b77: turn off contract inference for overrideable methods; hopefully, not forever (IDEA-127518) 49a119a: IDEA-121276 Constant conditions: work with arrays ec7c9a3: JavaConstantExpressionEvaluator: add @Nullable 563ad9e: move DfaValue calculation for expressions to one place 392885a: @NotNull DfaVariableValue.psiVariable ee1eb2c: store / use buildAttemptInfo taken from file 5de0503: insert delete with renaming logic for Windows into general delete method 0a6a374: Reverted: cast from Short to char is prohibited (1c5a03589e5155633de91051e59255d3ad84414f) 91b8291: IDEA-123863 'Save Project as Template' action leads to deadlock 3aed563: Platform: use presentable names when generate keymaps for help 34f2fb0: grayed call frame view if in library content (the same as in java) f9982ec: update grails project structure: remove proper instance of ContentEntry (IDEA-120622) ecbbac9: Merge remote-tracking branch 'origin/master' 9795ec8: extract from internal mode 3b00516: notnull 67b840e: notnull e4308c8: cleanup e75bfff: correctly calculate inspection short name in presence of upsource 52d5382: optimisation 1c5a035: cast from Short to char is prohibited 685fdb4: optimisation: getMaybeSuperClassSubstitutor() allows to avoid extra isInheritor() check 60d088f: notnull, cleanup b5fb827: optimisation edf6a02: Merge remote-tracking branch 'origin/master' 23586b9: Added coverage.py fixing wrongly optimized imports. f291cc5: Fixed getting helper. ecec042: Merge remote-tracking branch 'origin/master' f997ddb: StringPattern: restore binary compatibility (broken by 64c812e) 145fbae: Merge remote-tracking branch 'origin/master' 642faba: as ignored file set can be quite large, use map without entries for smaller memory footprint (e.g. in snapshot for IDEA-126201 number of ignored files is 126K, size of entries's memory for this number of elements is 2.5M) 067aa19: skip non-java files with given word in literal even before retrieving text / searching the word / finding element at tree (IDEA-126201) aea7066: allow to pull methods in super interface as default ones when super interface already contains a method with the same signature; remove @Override in such case (IDEA-126988) 7d19e0e: method refs: missed super class substitution (IDEA-126969) f17f398: 'unmark root' action: allow user to cancel exclusion if excluded folder is selected (IDEA-23537) d61da37: IDEA-122076, IDEA-122172 Multicaret actions in quick find mode 8b2824d: IDEA-121956 Multiple Carets: Add support for skipping next selection 1ce6009: - catch runtime problems (e.g. IndexOutOfBoundsException from ByteBuffer) when instantiating stub index and retry index creation (IDEA-117596, IDEA-125750), ep2 c455dd4: - catch runtime problems (e.g. IndexOutOfBoundsException from ByteBuffer) when instantiating stub index and retry index creation (IDEA-117596, IDEA-125750) - use deleteWithRenaming to update index version (IDEA-127160) b0fe937: DirectoryIndex: fixed 'isIgnored' for ignored files fb4de8d: DirectoryIndex: store info for excluded and ignored directories df68af8: require that notnull fields be initialized (IDEA-114889) 5a22fdc: IDEA-126660 Incorrect 'condition is always true' for a final field if an overridden method is called from the constructor a7ec8b7: IDEA-126173 Specify exception type thrown by @NotNull annotations f4d3703: IDEA-126531 Completion inserts unwanted semicolon 2887069: dfa: take into account known variable values when handling number comparisons (IDEA-126446) 1aa6e7a: add cast to disambiguate method calls when replacing their argument with null (IDEA-126466) 65ccc19: HardcodedContracts: use a utility method 4853112: dfa: we don't replace nontrivial references with constants, so remove the corresponding code and tests 3eea5bc: hardcode Guava Preconditions.checkNotNull contract 6d9cf39: dfa: understand assertThat notnull (IDEA-125977, IDEA-65004) 2a3e157: Merge remote-tracking branch 'origin/master' f5e85e7: IDEA-126122 (reFS volumes recognized) be1c5b8: Refactoring: configurable from provider can be null e568624: Cleanup (file watcher project converted to VS13; generated junk dropped; version info updated) 1e7a9e9: Cleanup (tabs/spaces; formatting) 0262088: cosmetics: add extra offset for project icon and label margins 1695e0c: fix AIOOB in history 2b3dd78: add css styles to Error Pane ac796ba: wrap errors to html for new project structure dialog 59763f3: Focus logging 3d07af1: SingleInspectionProfilePanel NPE fixed 397e893: IDEA-127478 Tip Of The Day dialog could be shown on incorrect monitor 090b2e5: on case-insensitive file systems perform file name compatibility check using real file names, not the ones taken from dynamically built or stored paths e77cd8a: imports optimized 444f98b: show tooltip when mouse on "multiscopes" icon in inspections settings ui b0b0fef: external build: send empty text with statistics message to avoid producing unnecessary output by IDEA Project Runner in TeamCity 970fe60: use ProjectFileIndex (from API) instead of DirectoryIndex (from impl) where possible d9c45be: libraries scope: use ProjectFileIndex (from API) instead of DirectoryIndex (from impl) where possible 417adc7: notnull b58808c: js postfix templates -> fix examples and configurable UI ba3b637: Settings dialog: the project-based configurables are marked with an icon. 6508417: IDEA-127438 Tool windows drag&drop failure under Java 7 ca76151: IDEA-127343 Github: pass ModalityState to PasswordSafe da5b7e6: use Couple e98cb25: use Couple 568b881: DBE-228: abbreviate long cell values abd5dce: notnull 6d84ba6: Add image to visualize the project-based configurable in the Settings dialog. c32d9db: [vcs] IDEA-107598 Affected paths: consider the whole path when highlighting f4ee468: Inspections settings ui: new inspections tree (tree replaced by table tree, added icons represent severities) 47de3cf6: LiteralExpressionTokenizer cleanup 120fe08: get rid of group border 1a5cf59: IDEA-127343 Pass ModalityState to PasswordSafe.store/removePassword 0143935: remove trivial javadocs f3eb2b2: [git] Remove obsolete password aware handler c4a16ff: Get password called with modality state; unnecessary get password from memory removed for hg auth bf1f916: restore API for idea-php-symfony2-plugin c53dc62: check for null 62e5744: unused classes from 'old make' implementation deleted b8ddfa4: groovy compiler: deleted classes related to old make implementation 42926e1: Fix possible NPE in JiraRepository (EA-58392) 3498760: Introduce live integration tests for Tasks plugin. Add initial version of such tests for Trello 6fd747b: Migrate TrelloRepository to new tasks API 3021914: Remove wrong GSON annotation of 'closed' field in TrelloCard e2b581a: IDEA-115177 (watcher size check no longer needed) c9d3e32: vfs: file watcher executable lookup fixed 431f9db: get rid of deprecated code 25c8ccd: EA-58277 (diagnostic) 6037400: don't highlight '*.restartRequired' properties in registry.properties as unused dfb7e51: registry: use 'restartRequired' property to enforce automatic restart 3305c35: registry: description text corrected 0ffc7d0: CIDR: OC-10422 Current resolve configuration should be preselected in resolve context panel 5f31e2e: show excluded files in Project View, initial (IDEA-23537) ace6fe4: artifacts: one more unused class from 'old make' implementation deleted 5897a01: deleted old unused deprecated 'build participant' classes b75d5f2: artifacts: unused classes from 'old make' implementation deleted 3ed24eb: RUBY-15507: we should not wrap configurables in scroll pane 26b2743: ProjectViewPsiTreeChangeListener updates tree more deeply on property changed 77ee53a: Fix again the following changes that were reverted: Fix for nullable composite configurable. d0cd301: IDEA-126984 Mac OS X: Double line above the editor c2e6ab0: revert (e3e3224e853ad466cebf13c8b5c7f01d272d8e88 Fix for nullable composite configurable.) d1ce059: highlight modules 8b6c40d: platform: splash misalignment on Windows fixed 8b35719: devkit: incorrect modification reverted 928feba: JavaHelp 2.0_05 bc13d12: Merge remote-tracking branch 'origin/master' a9d7da6: added test for default property file in resource bundle editor 160fca0: project: windows update scripts unified e3e3224: Fix for nullable composite configurable. a69f781: cleanup aa16ba2: IDEA-126984 Mac OS X: Double line above the editor 0994232: project: build/update scripts tuned to support home paths with spaces b9eb20d: EA-57720 - PIEAE: LeafPsiElement.a 1396322: IDEA-127343 Pass ModalityState to PasswordSafe.getPassword 7eb4791: [git] remove obsolete javadoc 4d3abb7: cleanup: remove trivial comments d8f6d4c: SeverityEditorDialog -- if "mixed" pseudo severity chosen then no default selection e39d6a2: ScopesAndSeveritiesTable suppress unused declaration inspection f356442: DBE-321: prevent data tooltip flicker 226e175: Inspection settings UI: multiselection in inspections tree support 68c651e: catch exception when writing content, marking vfs for rebuild (IDEA-125925) f59b7b3: revert the change that breaks functionality eac71fe: add tool window id to assertion 349f84a: fix several concurrency issues in Search Everywhere 65824bb: Some configurables requires the dynamic attribute to configure its wrapper properly. 73c7f62: go to action: move Analyze Stacktrace... to the top 5458ff1: IDEA-127232 Fatal Error on startup on Mac Os X 09230b2: IDEA-125671 "Install plugin from disk" action could have different description in Find Action list, because now there are two identical items in the list 35e6c4d: remove suppressions e83f80f: implicit usage provider for *.description in registry.properties ddae39a: file path completion fixed 6f1f6cf: Inspection settings UI: creation copy of tool wrapper if new scope inserted 981a3db: remove unused class ColumnInfo$StringColumn 864cd2d: Inspection settings UI: - "ALL" scope moved to last position in table - move scopes in single inspection disabled 4965683: Merge remote-tracking branch 'origin/master' e488923: cleanup. remove unused methods d13074e: fix incorrect target element calculation for text editors e2c4550: typo 219b31c: show display name for certificate configurable 1b4a3c1: larger scope when searching in path / ssr (IDEA-127068) 17a5843: disable plugins if cycle was detected (IDEA-127177) 001a9cf: New inspections settings UI: - nodes for individual scopes deleted from settings tree - added table in right panel to configure scopes&severities - new scope button added to right panel (if only default scope currently available) de377f8: extract variable missed events (IDEA-127166) dcb45f2: extract field missed events (IDEA-127167;IDEA-127169) 1d8d358: extract parameter missed after event added (IDEA-127170) 30d945d: local rename events (IDEA-127172; IDEA-127172) d0b86b6: popup position for injected fragments during inplace refactorings should be calculated over injected editor as caret range marker is against injected document (IDEA-126989) 53de53f: Refactoring of grouping configurables. Use the following registry key to switch on this functionality: ide.file.settings.order.new 088e9a2: Fix tests broken after changes for PY-4073 d5032e5: java: parser changes temporarily reverted 690797e: Using "Batch File Change" events in BuildManager to cancel already running automake session while VCS is updating working tree (IDEA-111673) 4e586b4: replace with foreach b70bfe3: Platform: do not produce duplicating shortcuts in help keymaps d65ce21: Merge remote-tracking branch 'origin/master' 6a141b3: [git] rename utility method to a more consistent name c5129cf: fix layouts: exclude structural search from defaul platform implementations a9396b6: unused property inspection: search in a narrow scope first 5002c70: contract inference: honor already known parameter values during data flow analysis 704198c: contract inference: exclude contracts not matching the return type 349f900: no ContractInference for compiled elements f02a497: change foreground and separator color 2e6fb66: move Problems down and change separator 6404449: support "--" separator aa98745: customize separator 2e376b2: [git] IDEA-126472 IDEA-126473 Notify about external changes modifying the working tree 1a37016: [git] better diagnostics output format f9a615b: [git] remove unused method 8b2eae6: [git] Remove duplicated code 5dd8682: Merge remote-tracking branch 'origin/master' 91b1e1b: Fixed wrong cast. 44ec294: annotate getPattern / getOptimizedIndexingPattern with @Nullable return info 50d32b8: SSR fix replace test 6521af0: IDEA-127077 (incorrect parsing of type annotations) 5b72fac: Cleanup (warning) 4fd07a5: License dialog: License key: provide precise diagnostic for rejected key After-review 6cb2eab: Remove deprecated Remote Interpreters API. 7d1e1bd: do not propagate exceptions in case of broken bytecode 980b837: fix test e099a14: gant-based builds: added one more jar required for Kotlin compiler f36393b: go to action: use gray instead of italic for shortcuts rendering 1c43998: gant-based builds: removed incorrect classpath entry for Kotlin compiler ffb8b5f: gant-based builds: added jars required for Kotlin compiler to classpath 47eec66: java inheritor cache: use weak map to avoid holding PSI for unnecessary long time 1a78fe7: goto action: restore enabled action preference 8343e8e: goto action: rewrite using a more suitable API, prefer exact matches, fix search everywhere a24b862: IDEA-127189 Select next/all ocurrence fails with braces 304a69b: IDEA-103025, EA-53787 fix handling of special fonts in soft wrap model dd4b761: trait field searching test fixed 7c04faa: Merge remote-tracking branch 'origin/master' bad2fa8: Fixing env tests dd23104: memory hogging on javaInjections.xml editing 1bbb89c: go to action: proper text attributes for shortcuts 0057fe3: go to action: don't show duplicate group for intention actions 482084c: go to action: missing bg for option descriptors 590ed4c: disable create instance field inside interface (IDEA-127211) 65d11dd: disable generate test method actions outside class (IDEA-127246) f69f9fd: leave static modifiers in interface methods (IDEA-127255) 42f50f6: sort items in goto action by matching degree (IDEA-112613) 45915b2: show colored matched fragments in goto action (IDEA-112613) 34b51a1: show progress when status bar is switched off (registry: ide.show.progress.without.status.bar) cdbb650: null check d5dada4: disposed check 64c812e: StringPattern: cleanup, add StringPatternTest 7fa1b28: remove unnecessary field and getter from SSR ReplaceCommand 472bf3f: java: decompiler fixes (IDEA-127229 et al.) 7ada79c: AnnotationUtil.isInferredAnnotation c0d76a2: parameter info: don't retrieve inferred annotations in dumb mode 4df5903: IDEA-127127 charset name completion: provide completion for more APIs b6ea24b: don't duplicate annotations in parameter info 3f7935a: dfa: turn on contract inference from source b6b76b7: IDEA-127212 Live templates "Use static import if possible" fails sometimes f372b95: corrected 'isCommunity' check 08b3c7f: disable table speed search for empty tables 51292ab: Merge branch 'svn_18_3' 511f071: IDEA-126911 Do not call "VirtualFile.getFileType()" for directories (while checking if annotate action is enabled) 93e45b9: PY-4073 Add completion for special function attributes 0a5bfb0: Merge remote-tracking branch 'origin/master' 0fcfb36: Merge remote-tracking branch 'origin/master' 86b7bf5: Fix copying .egg files from remote host to libraries folder (PY-13044). 912c0d7: html editor as a error pane 55b4064: do not modify profile file to write default 102bcb2: structural search bundles fixed bb0b50c: Merge remote-tracking branch 'origin/master' 5253ba2: type migration: make part of platform 3ab8864: Error pane initial 581cf80: structural search: groovy should work in community 76726e8: disposed check be28458: css synchronizer — support of manual update 7b29836: structural search: make part of platform: enable in community a5061f3: fix module locations a3626d6: Merge remote-tracking branch 'origin/master' 695682b: structural search: make part of platform 50e27ec: enable inline redundant local vars in batch mode (IDEA-126957) 49b9f30: EA-58014 (diagnostic) 9d32e98: added incompatible plugin version org.intellij.clojure / 0.2.1.178 c27d829: - properly retrieve set of keys for unsaved documents for their first change - (in extra sanity checks mode) added PHM that stores previous content indexing info to facilitate discovery / fixing assertions about different indexing behavior 0896a25: avoid accessing invalid buffer 159f3c5: Merge remote-tracking branch 'origin/master' 4cc0d5e: IDEA-127183 Windows: Look for 1.8 JVM registry entry by default f41f9c3: Debugger reader performance fixed, now uses BaseOutpuReader (PY-11393). cb2159e: Switch on new Alt processing under Windows L&F. ef8fbc4: notnull e1445e6: Gradle: IDEA-127217 Tests with spaces aren't wrapped with " " when running specific test as Gradle test cf1f8c8: fix tests: improved 'isInCommunity' check bcb38e5: pep8.py updated to version 1.5.7 0970549: Coverage.py updated to version 3.7.1 90f034d: Merge remote-tracking branch 'origin/master' 6ec557a: Try even more to connect. 69a2a4c: path completion: rendering fixed for several matched context helpers 21efa65: cleanup 7494ef6: support for multiple groups in idea.test.group property 00847ed: No need to register MessageBusConnection disposal: it is done automatically 0fb6eab: Suppress "unused" constructor: initialized as a Service 2b6b549: Fix already disposed EA-58257 fc46e99: look for JVM 1.8 first b7cea46: added caching + minor size opts 63453c3: fix NPE in SSR ReplaceOptions 941c20b: stop usage search on target invalidation (EA-54329) d75a12f: invalid type diagnostics (EA-58069) 121084a: remove specific Lookup check in intentions pass, daemon is disabled during lookup anyway (EA-58246) 71fd543: WEB-9954 move and rename confusing "Live Edit menu item 5d745b1: check for null for content hashes flush 7073116: fix ConcurrentMapsTest (by Roman) 6d6d15a: IDEA-122254 ("Use static import if possible" checkbox for the Structural Replace dialog) 4febd80: intention should insert static import when replacing assertNull() with assertEquals() 4dd5b62: insert static import when replacing assertEquals() with intention 591d723: insert static import when flipping assert literal outside of test method 90f2ef5: use static import if it is already present for several intentions and inspections dd2d141: Merge remote-tracking branch 'origin/master' ae714aa: Win tests should be more stable now 3b3fee8: clear zip file caches on low memory c81ebf5: Refactoring: create instances of IdeConfigurablesGroup and ProjectConfigurablesGroup in one place. b83f901: Merge remote-tracking branch 'origin/master' 9c13559: Comment. ec56a95: skip leading .* in reg exp used for todo pattern when building index: it makes sense for todo highlighting but increases matching due to backtracking enormously (IDEA-74104) d83e334: IDEA-126836 Formatting adds extra blank line after class header if it ends with comment 6d68662: use 1px separators e360f62: add utility method to configure 1px separator 23ba8f8: use encoding info when building hash for contents + rebuild content dependent indices upon encoding change 7b8cc6c: Fixed according to CR-IC-5706 93d37fa: don't auto-import classes where class references are prohibited syntactically (IDEA-127190) f87e56d: bytecode analysis: a comment about lite control flow analysis 90addcd: bytecode analysis: lazy resultOrigins analysis a7c1c1a: bytecode analysis: lite control flow analysis 8211223: bytecode analysis: no analysis at all if nothing is inferrable 60a6acf: moved method from moduleutils to modulerootmodificationutils 3173594: jetbrains.dic + "unmerged" c95ca18: support prefix expressions in quick evaluate 8d71285: cleanup e418bb2: use UIUtil.getSidePanelColor() bef8a7f: remove border for content component 1835567: + side panel color 105b9a1: @Nullable ef2875d: IDEA-126927 - CloudFoundry: if the same artifact is deployed to few clouds, one can view the state of the single one only 7c415ca: IDEA-127069 help button added to the Mercurial Update Project dialog 0e12675: Revert "StringUtil: Added wordWrapString() method" 5af9e9f: separator titles for Darcula b044185: better IDE responsiveness during bytecode indexing 15ed602: svn: Fixed converting SVNStatusType to StatusType - explicitly check values that have non-unique status type names 43b6e08: BytecodeAnalysisIntegrationTest: a more profound test data refresh 525a896: Merge branch 'master' of git+ssh://git.labs.intellij.net/idea/community fe15e86: bytecode analysis: index version increment 0d24afe: disabled batch evaluator by default, overhead is too high. Enable if needed using registry key debugger.batch.evaluation 0069112: added nontull 062254d: svn: Refactored SvnStatusHandler - moved status type parsing to StatusType class a884c1b: ignore inferred annotations when checking overriding parameters nullability b6b6ea1: bytecode analysis: lite persistent hashCode for Configuration 1f05788: bytecode analysis: skipping frames and debug instructions aa3b189: svn: Refactored StatusType - removed deprecated STATUS_MERGED value f2d0988: do not capitalize titles 5ba1390: extract CachingEnumerator from PersistentStringEnumerator 1cd8961: IDEA-126973 IDE does not see the use of string resources in libraries bb0b856: @NotNull b700084: nullable/notnull/contract intentions available on library sources in Groovy 18afce3: external/inferred annotation gutter markers in Groovy 18dcabb: [vcs] assert annotationProvider != null (otherwise the action shouldn't be enabled) ff89749: [vcs] Handle all exceptions during annotation similarly. EA-58021 45cd16b: svn: Refactored SvnVcs.svnStatusIs() - method moved to Status class 37423bb: StringUtil: Added wordWrapString() method 228a7f0: IDEA-127144 (No word wrap or horizontal scrolling for ignore list in Proxy dialog) a570ab1: SSR: remove unused code from replace options e19e11f: SSR replace handler cleanup ab42788: IDEA-125021 Improve multi-caret copy-paste logic 8662259: IDEA-127125 Throwable at com.intellij.debugger.ui.breakpoints.BreakpointWithHighlighter.reload 7c0fcae: fixed Breakpoints tests be3789a: do not calculate source position twice a22e159: Disable css emmet fuzzy search by default 687fa76: no need for findClass if we have object already 86d795a: ElementPatternCondition: less memory 7801b62: ObjectPattern.oneOf(T...): less memory for single value 394b9f4: GPUB: keyword completion on non-trivial offsets d9d79ac: resource bundle editor property rename fixed: no property with old invalid name creation after rename 233bdf9: svn: Replaced SVNStatusType (from SVNKit) with custom StatusType enum 5803898: action popups are broken if new project settings dialog option is on 1b4fef2: svn: Replaced SVNEventAction (from SVNKit) with custom EventAction enum cbcc65f: more robust lookup start offset invalidation tracking (EA-51961 etc) 4703c93: IDEA-75885 Fast invoking shortcut with Alt brings focus to main menu [Windows] IDEA-87408 Hold <ALT> button and select multiple lines vertically does not work as desired 19eb679: Add diagnostic info 40acdf7: License dialog: provide precise diagnostic for rejected key (IDEA-122894, IDEA-125916, IDEA-125917, IDEA-125921) e28ee1b: display inferred annotations in italic fe21a09: IDEA-81781 Unintuitive 'Project Files Changed' message box 78b8b2c: Fix emmet tests b363ad7: notnull ac8bf17: added comment d61930f: revert 43b0575239264eef2d4a5299b48f3d2119d8d380 3674c5e: PY-13140 Fix several minor issues found during code review 99058ce: TableView: update only visible column sizes 4a8c1f7: fix BytecodeAnalysisTest? 6563b56: fix BytecodeAnalysisTest? f6c970f: IDEA-126896 Gradle: Grails integration buildPlugins issues 1ee9d0e: ModuleWithDependentsScope, another minor post-review cleanup 1478698: Fast groovy tests: exclude external system integration tests from FastGroovyTestSuite (e.g. GrailsGradleImportingTest) 43b0575: IDEA-126836 Formatting adds extra blank line after class header if it ends with comment abcc172: IDEA-127062 Bug when closing splitted editors ee204e9: IDEA-127045 Unexpected 'step over' behaviour in debugger a2fb7f6: DBE-60 Windows Authentication for SQL Server 79fc7d2: processing vcs changed text only if we are not processing selected one 0f34d80: Add diagnostic info f506a15: PY-13140 Underscored names are completed inside import statement e0ddef7: PY-13140 Top-level module names starting with underscore are not imported via wildcard import b6eee00: fix PushedFilePropertiesUpdater registration b9e4b83: BytecodeAnalysisIndex is only in idea 4dfa351: BytecodeAnalysisConverter is only in idea cb79e2e: if editor has selection, reformat only selected lines, when reformat dialog is not shown. Do not take into account processChangedText flag 76ed9f7: Merge branch 'master' of git.labs.intellij.net:idea/community c3c78c5: IDEA-93948 Code Style->XML->Other: no preview text for CDATA d08111d: XML Colors settings: fix description text (missing line breaks) 34dcb63: SSR: remove unnecessary class 472994b: remove some dead code from SSR search dialog fca4af3: more efficient shortening of class references in SSR 560fb1c: fix not-null assertion e13715a: fix NPE 9665337: javascript-psi-impl shouldn't depend on lang-impl 2a91d19: fixed XBreakpointManager.testSerialize f698722: ModuleWithDependentsScope minor post-review cleanup 9d399a3: WEB-12568 Dart autocomplete deletes closing bracket 95673d1: don't use getPackage to check if PSI directory is in source (SCL-7243) fbbb221: bytecode analysis: copying to prevent indices/enumerators issues 831ab18: bytecode analysis: compound keys - maximizing shared stuff 0ddd915: enable inference bytecode indexing only in internal & test mode ee888c2: fix AnnotateMethodTest e0861b8: bytecode analysis: leaking parameters analysis 31d93dc: ProjectBytecodeAnalysis -> service fbb2825: ignore inferred annotation in @NotNull propagation to overridden methods 1b75318: adapt testdata to inferred jdk annotations f350b3d: hold inferred @NotNull annotation on a hold reference to avoid UsageViewTest failures 0e53e62: bytecode analysis: assume that errors are due to incorrect bytecode affe6e0: log bytecode analysis related information with debug level a11744c: adapt dfa testAlexBug to jdk notnull inference f98006e: ProjectBytecodeAnalysis: use tree hierarchy to find parameter's declaring method bfbc9e0: limiting number of elementary steps of bytecode analysis to 30000 8eee4f3: import fix 261018e: logging AnalyzerException ce0e838: bytecode analysis: increase IDE responsiveness during large class file analysis (e.g. groovy's ArrayUtil) 3357716: bytecode analysis: use separate keys for each file to avoid O(n^2) index removeAssociatedValue 7fc5d9e: make BytecodeAnalysisTest more classloader-agnostic bd2ebf1: cache enumerator data, remove unnecessary enumerator queries 14ed555: do postponed formatting in JavaChainLookupElement (EA-56040) ce64501: no contract annotation if method is already @NotNull 1754869: findInferredAnnotations returns @NotNull array 3b75ca1: refresh annotations dir during setUp 6dd49cb: update annotations after mockJDK update c5f3f23: hashCode via ordinal, copying equations into solver 90e6e0d: myAnnotations are volatile 08811b0: more logging 1e54e26: simple smoke test that inferred annotations are visible to codeInspection 0db25e9: arrays and primitive collections 658b710: aligned with faba b993ee5: NPE fix (possible dead bytecode instructions) 8de3dfd: using TIntObjectHashMap in analysis (instead of wrappers) 0ea01ef: minimal structures for inferred annotations in memory 861fbc7: reload inferred annotations on root change 92e2f07: checking enumerators during converter initialization 338481d: Bytecode analysis tests 75eb3cb: straightforward solver 7056873: rigid infinum 6220d1a: porting faba features ecce111: Contract clauses normalization (sorted) cd2946b: Tuning null -> ... analysis 82e7fd8: Parameters: CNPE join CYCLE = CYCLE ade7530: stable calls (ported from faba) 657b7af: starting integrations tests 70cc7af: qname may be null (anonymous classes) f2e3270: smart propagation (resultOrigins analysis) 05486ff: separate indices eb58968: a proper disposal of app component in the test 1901b35: this is NotNullValue f8a153e: equals/hashCode for index values 6dcf377: comments + refactoring 383752b: compound indices in action ff84337: initial implementation for compound keys d905c85: towards compound keys 68ef299: Refactoring: moving class processing into DataIndexer c8e0bef: towards compound ids bacb5f5: extracting converter 655e0ba: poc implementation via indices eac23b5: indexing dabdf64: towards indexing 10fda95: emulating touch via special value e1fce3d: No need for components (with touched flag) in raw equations 44ea307: testing contract annotations a8717a4: testing hand-coded classes fb6ce34: Start test for BytecodeAnalysis 8ec604c: using (naively yet) solutions of IntIdSolver in Idea fc86f2b: full cycle of solving (not tested yet) 1d2c7d5: in progress: internalizing equations c31fd4a: No need for signatures 41d2da1: poc usage of enumerators 073012d: inferred annotations in gutter 8c57905: Draft implementation of inferring Nullity and Contract annotations dca785b: stub for inferred annotations d83c897: @notnull aeb0b27: allow / skip build attempt number e.g. IU-135.455.10 (IDEA-122579) 604ec3a: Add workaround for CertificateManager *service* to work with old task connectors b96c582: Rename classes related to legacy JIRA interface to not cause confusion anymore 165465c: IDEA-123972 Display JIRA version under JQL editor field in settings 6708af2: Use simpler test API for performing actions eb009fc: introduced expandNodesOnLoad in XDebuggerTree 1c3cefb: nice diagnostics for failed assertion about same indexing result from the same content 6b75304: IDEA-127042 Create tip of the day for quick evaluate b8d6082: add to watches with control+enter from evaluate dialog 86d958c: for nonntrivial file systems (e.g. in tests): build todo data flag should have the same value as todoindex.isAcceptable (cherry picked from commit 5213e01) 962d04b: svn: Refactored SyntheticWorker - reused default DirectoryEntry comparison for sorting 99e9710: IDEA-107376 Goto next / previous splitter not working when show tabs are set to none (Carlos Ortiz) 0ce61f1: notnull a0f6b0a: svn: Refactored "checking node kind" logic - use utility methods (instead of direct comparison with NodeKind enum values) 2b5e1c1: update breakpoints and execution line highlighters on color scheme change 69be4e2: svn: Refactored RepositoryBrowserDialog - removed duplication and simplified "set action enabled" logic e438d20: Merge remote-tracking branch 'origin/master' 1cc9650: IDEA-122894 License dialog: License key: provide precise diagnostic for rejected key (cherry picked from commit ff0eada) (cherry picked from commit 29c98ba) 9241cae: Merge remote-tracking branch 'origin/master' 7d896bb: provide completion variants for Charset.forName() in Groovy (IDEA-126595) e62b6df: IDEA-126973 IDE does not see the use of string resources in libraries 02e5ede: updated javadoc for exit and restart actions e1d0804: WEB-12677 Emmet: do not expand too long abbreviations 6046586: Emmet: preview editor should have maximum width if softwraps are enabled be20e87: DOC-3246 IDEA: Update master password dialog panels help ca5260d: IDEA-79083 Popup lists are sometimes not clickable 8a05f8e: Merge remote-tracking branch 'origin/master' be76dc1: highlight matches in plugins table fcd53e5: save search query as a client property fd6b2c9: add a client property key for search query febbd75: fix inspection setting e119956: svn: Simplified node kind values parsing - parse right to NodeKind enum (using jaxb annotations) 6979d52: do not let transient iterator escape as it references nonsharable thread locals 746de0c: diff: fix example in diff editor color settings 3eebc8b: show different icons for muted enabled/disabled breakpoints 1b1a23a: IDEA-120465 Code completion: no Autopopup code completion and inserting pair brackets if Column selection mode is on 9dde9e3: svn: Replaced SVNNodeKind (from SVNKit) with custom NodeKind enum ed3bf70: split 'force' flag into two different ones: 'force' and 'exitConfirmed', so that now 'force==true' guarantees application shutdown; (fixes also IDEA-127009) 756bc03: performance optimization (following IDEA-CR-337) 7a94893: change storage format for smart backspace configuration (following review CR-IC-5570) 8feafdf: Updated tests due to changes in interface of CommentByLineCommentAction 5d8aeec: IDEA-127074 Executing gradle script fails with error '.../build.gradle' is not a directory 546e0ba: IDEA-88643 delete item from list of breakpoints by pressing delete key: selection should not return to position 1. 4de85a6: IDEA-122513 Can't expand single character property in Spring config 2595051: fix blinking test 51305d0: fix test (following IDEA-124928) 51c9f4a: IDEA-121060 Multiple carets: comment/uncomment works incorrectly 0ddcf6e: IDEA-121060 Multiple carets: comment/uncomment works incorrectly d51dd50: IDEA-121060 Multiple carets: comment/uncomment works incorrectly 9307f63: svn: Save selected depth for update/integrate operations in configuration 0e44683: svn: Refactored DepthCombo - use custom renderer to display presentable depth labels (instead of separate DepthWithName class) c20bfe7: svn: Replaced SVNDepth (from SVNKit) with custom Depth enum 0e8564a: Merge remote-tracking branch 'origin/master' c3e78ad: Add read action. 54474d0: TestDataGuessByExistingFilesUtil: prevent thread starvation and use processor to avoid several huge String collections in the memory (IDEA-127038) 02c1b76: svn: Utilized CommitInfo when parsing "svn list" output and for DirectoryEntry implementation ac00218: svn: Unified "lock" information parsing for info, status and browse clients (utilized Lock class for all cases) 3ea1e9e: Merge remote-tracking branch 'origin/master' 61c9bc4: highlight matches in selected search result e3fd6eb: don't add a newline when completing with smart enter shortcut (IDEA-126726) 4029019: EA-57959 - ISE: DomInvocationHandler.getAttributeChild diagnostics e3c7a70: don't require PerformanceWatcher unresponsiveness threshold to be divisible by unresponsiveness interval (IDEA-127038) 02b931c: diff: fix NPE dc4f600: Merge remote-tracking branch 'origin/master' bcc88e8: svn: Replaced SVNLock (from SVNKit) with custom Lock class f21bc13: svn: Replaced SVNDirEntry (from SVNKit) with custom DirectoryEntry class 8434e55: svn: Fixed NPE in SvnKitBrowseClient (SVNLogClient.doList() logic) - use SVNRevision.UNDEFINED if null is passed e6f257f: Added multi-associations in win (un)installer with the controls on the option page. 6eb7f53: svn: Replaced ISVNAnnotateHandler (from SVNKit) with custom AnnotationConsumer class 535cb8c: svn: Refactored SvnRemoteFileAnnotation - removed unused fields 56cd6a8: svn: Refactored BaseSvnFileAnnotation - utilized CommitInfo for implementation logic 6916bac: svn: Refactored BaseSvnFileAnnotation - code simplified, duplication and unused code removed, warnings fixed 79f167e: svn: Utilized CommitInfo when parsing "svn annotate" output 5585db2: svn: Replaced SVNCommitInfo (from SVNKit) with custom CommitInfo class 3b0971e: svn: Replaced SVNDiffOptions (from SVNKit) with custom DiffOptions class 58ab1b9: svn: Replaced SVNConflictVersion (from SVNKit) with custom ConflictVersion class d287078: svn: Fixed "memory leak" error (on application close) after viewing merge source hierarchy details - correctly register dialogs for disposal ef3bc87: svn: Refactored TreeStructureNode - made not to be generic, renamed ff2698d: svn: Unified LogEntry and LogEntryPath with corresponding similar classes used for svn executable xml output parsing 00224be: svn: Replaced SVNLogEntryPath (from SVNKit) with custom LogEntryPath class 6a330b5: svn: Replaced SVNLogEntry (from SVNKit) with custom LogEntry class 615f0cb: svn: Refactored parsing of child xml elements collections - use @XmlElementWrapper (instead of intermediate classes) 5ac00c0: svn: Refactored SvnUtil - moved "getPathForProgress" to ProgressEvent cef6176: svn: Refactored ProgressTracker - removed unused method parameters, make inherit ThrowableConsumer 17cbb94: svn: Replaced SVNEvent (from SVNKit) with custom ProgressEvent class 57e1c41: svn: Preserve several more used fields when converting from SVNStatus to Status 37798c0: svn: Allow @Nullable values when converting SVNKit types to corresponding svn4idea types ea32a54: IDEA-125627 Added new conflict reason values available in svn 1.8 099870d: svn: Replaced SVNConflictAction, SVNConflictReason, SVNOperation (from SVNKit) with corresponding custom enums 6dc734f: svn: Replaced SVNTreeConflictDescription (from SVNKit) with custom TreeConflictDescription class 57db2a5: svn: Replaced SVNStatus (from SVNKit) with custom Status class 6730240: svn: Replaced SVNInfo (from SVNKit) with custom Info class b527300: svn: Removed unnecessary IdeaSVNInfo - directly use SVNInfo instead 46fdb20: svn: Refactored UpdateClient - make methods throw SvnBindException (instead of SVNException from SVNKit) 9ccd315: svn: Refactored StatusClient - make methods throw SvnBindException (instead of SVNException from SVNKit) bb3582d: svn: Refactored RepeatSvnActionThroughBusy - make methods throw VcsException (instead of SVNException from SVNKit) 33b3795: svn: Refactored InfoClient - make methods throw SvnBindException (instead of SVNException from SVNKit) ab74ce3: svn: Refactored CmdInfoClient - removed duplication, methods extracted/inlined, @NotNull 29f24cf: svn: Refactored CmdInfoClient - inlined "doInfo" methods, removed duplication 17d79f7: svn: Refactored UpdateClient - removed unnecessary methods 19cc497: svn: Refactored StatusClient - removed unnecessary "doStatus" overload method (call replaced with more general overload) faa09ef: svn: Refactored StatusClient - removed unnecessary methods b45cfaa: svn: Refactored StatusClient - removed unnecessary "doStatus(File path, boolean remote, boolean collectParentExternals)" overload method (as always invoked with collectParentExternals = false) ae24d84: svn: Refactored InfoClient - removed unnecessary methods 2e97768: svn: Added some todo items for working copies of 1.8 and greater formats (cleanup, nested working copies file status refresh) 7874cb3: svn: Refactored SvnCheckoutProvider - removed unused parameters, @NotNull 22b2900: svn: Refactored SvnCheckinHandlerFactory - code simplifications, warnings fixes 1ce0847: IDEA-118540 Refactored "committing changes to same repository but from working copies of different formats" detection logic, updated confirmation message 491ac7d: svn: Updated working copy format checks to be "weaker" - to correctly support working copies with greater than svn 1.8 formats Change-Id: I25f808eb8c86e4bd21610b40ab6b6df310d8e636
Diffstat (limited to 'python/helpers')
-rw-r--r--python/helpers/coverage/__init__.py48
-rw-r--r--python/helpers/coverage/__main__.py5
-rw-r--r--python/helpers/coverage/annotate.py15
-rw-r--r--python/helpers/coverage/backward.py96
-rw-r--r--python/helpers/coverage/bytecode.py74
-rw-r--r--python/helpers/coverage/cmdline.py349
-rw-r--r--python/helpers/coverage/codeunit.py44
-rw-r--r--python/helpers/coverage/collector.py74
-rw-r--r--python/helpers/coverage/config.py211
-rw-r--r--python/helpers/coverage/control.py374
-rw-r--r--python/helpers/coverage/data.py52
-rw-r--r--python/helpers/coverage/debug.py54
-rw-r--r--python/helpers/coverage/execfile.py86
-rw-r--r--python/helpers/coverage/files.py220
-rw-r--r--python/helpers/coverage/fullcoverage/encodings.py57
-rw-r--r--python/helpers/coverage/html.py177
-rw-r--r--python/helpers/coverage/htmlfiles/coverage_html.js6
-rw-r--r--python/helpers/coverage/htmlfiles/index.html13
-rw-r--r--python/helpers/coverage/htmlfiles/jquery.min.js (renamed from python/helpers/coverage/htmlfiles/jquery-1.4.3.min.js)0
-rwxr-xr-x[-rw-r--r--]python/helpers/coverage/htmlfiles/keybd_closed.pngbin177 -> 264 bytes
-rwxr-xr-x[-rw-r--r--]python/helpers/coverage/htmlfiles/keybd_open.pngbin175 -> 267 bytes
-rw-r--r--python/helpers/coverage/htmlfiles/pyfile.html15
-rw-r--r--python/helpers/coverage/htmlfiles/style.css29
-rw-r--r--python/helpers/coverage/misc.py32
-rw-r--r--python/helpers/coverage/parser.py442
-rw-r--r--python/helpers/coverage/phystokens.py112
-rw-r--r--python/helpers/coverage/report.py37
-rw-r--r--python/helpers/coverage/results.py73
-rw-r--r--python/helpers/coverage/summary.py31
-rw-r--r--python/helpers/coverage/templite.py228
-rw-r--r--python/helpers/coverage/tracer.c730
-rw-r--r--python/helpers/coverage/tracer.pydbin9728 -> 0 bytes
-rw-r--r--python/helpers/coverage/version.py9
-rw-r--r--python/helpers/coverage/xmlreport.py38
-rw-r--r--python/helpers/generator3.py3
-rw-r--r--python/helpers/pep8.py843
-rw-r--r--python/helpers/pycharm/lettuce_runner.py132
-rw-r--r--python/helpers/pycharm_generator_utils/module_redeclarator.py2
-rw-r--r--python/helpers/pycharm_generator_utils/util_methods.py41
-rw-r--r--python/helpers/pydev/pydevd_comm.py2
-rw-r--r--python/helpers/pydev/pydevd_signature.py2
41 files changed, 3366 insertions, 1390 deletions
diff --git a/python/helpers/coverage/__init__.py b/python/helpers/coverage/__init__.py
index d8dbc0f6dc88..193b7a107ebd 100644
--- a/python/helpers/coverage/__init__.py
+++ b/python/helpers/coverage/__init__.py
@@ -5,19 +5,13 @@ http://nedbatchelder.com/code/coverage
"""
-__version__ = "3.5" # see detailed history in CHANGES.txt
-
-__url__ = "http://nedbatchelder.com/code/coverage"
-if max(__version__).isalpha():
- # For pre-releases, use a version-specific URL.
- __url__ += "/" + __version__
+from coverage.version import __version__, __url__
from coverage.control import coverage, process_startup
from coverage.data import CoverageData
from coverage.cmdline import main, CoverageScript
from coverage.misc import CoverageException
-
# Module-level functions. The original API to this module was based on
# functions defined directly in the module, with a singleton of the coverage()
# class. That design hampered programmability, so the current api uses
@@ -36,12 +30,34 @@ def _singleton_method(name):
called.
"""
+ # Disable pylint msg W0612, because a bunch of variables look unused, but
+ # they're accessed via locals().
+ # pylint: disable=W0612
+
def wrapper(*args, **kwargs):
"""Singleton wrapper around a coverage method."""
global _the_coverage
if not _the_coverage:
_the_coverage = coverage(auto_data=True)
return getattr(_the_coverage, name)(*args, **kwargs)
+
+ import inspect
+ meth = getattr(coverage, name)
+ args, varargs, kw, defaults = inspect.getargspec(meth)
+ argspec = inspect.formatargspec(args[1:], varargs, kw, defaults)
+ docstring = meth.__doc__
+ wrapper.__doc__ = ("""\
+ A first-use-singleton wrapper around coverage.%(name)s.
+
+ This wrapper is provided for backward compatibility with legacy code.
+ New code should use coverage.%(name)s directly.
+
+ %(name)s%(argspec)s:
+
+ %(docstring)s
+ """ % locals()
+ )
+
return wrapper
@@ -57,10 +73,26 @@ report = _singleton_method('report')
annotate = _singleton_method('annotate')
+# On Windows, we encode and decode deep enough that something goes wrong and
+# the encodings.utf_8 module is loaded and then unloaded, I don't know why.
+# Adding a reference here prevents it from being unloaded. Yuk.
+import encodings.utf_8
+
+# Because of the "from coverage.control import fooey" lines at the top of the
+# file, there's an entry for coverage.coverage in sys.modules, mapped to None.
+# This makes some inspection tools (like pydoc) unable to find the class
+# coverage.coverage. So remove that entry.
+import sys
+try:
+ del sys.modules['coverage.coverage']
+except KeyError:
+ pass
+
+
# COPYRIGHT AND LICENSE
#
# Copyright 2001 Gareth Rees. All rights reserved.
-# Copyright 2004-2010 Ned Batchelder. All rights reserved.
+# Copyright 2004-2013 Ned Batchelder. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
diff --git a/python/helpers/coverage/__main__.py b/python/helpers/coverage/__main__.py
index af5fa9f6819f..55e0d259e04a 100644
--- a/python/helpers/coverage/__main__.py
+++ b/python/helpers/coverage/__main__.py
@@ -1,3 +1,4 @@
-"""Coverage.py's main entrypoint."""
+"""Coverage.py's main entry point."""
+import sys
from coverage.cmdline import main
-main()
+sys.exit(main())
diff --git a/python/helpers/coverage/annotate.py b/python/helpers/coverage/annotate.py
index a556d853cdbd..5c396784445c 100644
--- a/python/helpers/coverage/annotate.py
+++ b/python/helpers/coverage/annotate.py
@@ -2,6 +2,7 @@
import os, re
+from coverage.backward import sorted # pylint: disable=W0622
from coverage.report import Reporter
class AnnotateReporter(Reporter):
@@ -26,20 +27,20 @@ class AnnotateReporter(Reporter):
"""
- def __init__(self, coverage, ignore_errors=False):
- super(AnnotateReporter, self).__init__(coverage, ignore_errors)
+ def __init__(self, coverage, config):
+ super(AnnotateReporter, self).__init__(coverage, config)
self.directory = None
blank_re = re.compile(r"\s*(#|$)")
else_re = re.compile(r"\s*else\s*:\s*(#|$)")
- def report(self, morfs, config, directory=None):
+ def report(self, morfs, directory=None):
"""Run the report.
See `coverage.report()` for arguments.
"""
- self.report_files(self.annotate_file, morfs, config, directory)
+ self.report_files(self.annotate_file, morfs, directory)
def annotate_file(self, cu, analysis):
"""Annotate a single file.
@@ -59,9 +60,9 @@ class AnnotateReporter(Reporter):
dest_file = filename + ",cover"
dest = open(dest_file, 'w')
- statements = analysis.statements
- missing = analysis.missing
- excluded = analysis.excluded
+ statements = sorted(analysis.statements)
+ missing = sorted(analysis.missing)
+ excluded = sorted(analysis.excluded)
lineno = 0
i = 0
diff --git a/python/helpers/coverage/backward.py b/python/helpers/coverage/backward.py
index f0a34ac4ceb1..7d2685459782 100644
--- a/python/helpers/coverage/backward.py
+++ b/python/helpers/coverage/backward.py
@@ -6,7 +6,7 @@
# W0611: Unused import blah
# W0622: Redefining built-in blah
-import os, sys
+import os, re, sys
# Python 2.3 doesn't have `set`
try:
@@ -24,6 +24,31 @@ except NameError:
lst.sort()
return lst
+# Python 2.3 doesn't have `reversed`.
+try:
+ reversed = reversed
+except NameError:
+ def reversed(iterable):
+ """A 2.3-compatible implementation of `reversed`."""
+ lst = list(iterable)
+ return lst[::-1]
+
+# rpartition is new in 2.5
+try:
+ "".rpartition
+except AttributeError:
+ def rpartition(s, sep):
+ """Implement s.rpartition(sep) for old Pythons."""
+ i = s.rfind(sep)
+ if i == -1:
+ return ('', '', s)
+ else:
+ return (s[:i], sep, s[i+len(sep):])
+else:
+ def rpartition(s, sep):
+ """A common interface for new Pythons."""
+ return s.rpartition(sep)
+
# Pythons 2 and 3 differ on where to get StringIO
try:
from cStringIO import StringIO
@@ -49,6 +74,18 @@ try:
except NameError:
range = range
+# A function to iterate listlessly over a dict's items.
+try:
+ {}.iteritems
+except AttributeError:
+ def iitems(d):
+ """Produce the items from dict `d`."""
+ return d.items()
+else:
+ def iitems(d):
+ """Produce the items from dict `d`."""
+ return d.iteritems()
+
# Exec is a statement in Py2, a function in Py3
if sys.version_info >= (3, 0):
def exec_code_object(code, global_map):
@@ -66,21 +103,32 @@ else:
)
)
-# ConfigParser was renamed to the more-standard configparser
-try:
- import configparser
-except ImportError:
- import ConfigParser as configparser
-
-# Python 3.2 provides `tokenize.open`, the best way to open source files.
-try:
+# Reading Python source and interpreting the coding comment is a big deal.
+if sys.version_info >= (3, 0):
+ # Python 3.2 provides `tokenize.open`, the best way to open source files.
import tokenize
- open_source = tokenize.open # pylint: disable=E1101
-except AttributeError:
+ try:
+ open_source = tokenize.open # pylint: disable=E1101
+ except AttributeError:
+ from io import TextIOWrapper
+ detect_encoding = tokenize.detect_encoding # pylint: disable=E1101
+ # Copied from the 3.2 stdlib:
+ def open_source(fname):
+ """Open a file in read only mode using the encoding detected by
+ detect_encoding().
+ """
+ buffer = open(fname, 'rb')
+ encoding, _ = detect_encoding(buffer.readline)
+ buffer.seek(0)
+ text = TextIOWrapper(buffer, encoding, line_buffering=True)
+ text.mode = 'r'
+ return text
+else:
def open_source(fname):
"""Open a source file the best way."""
return open(fname, "rU")
+
# Python 3.x is picky about bytes and strings, so provide methods to
# get them right, and make them no-ops in 2.x
if sys.version_info >= (3, 0):
@@ -92,6 +140,19 @@ if sys.version_info >= (3, 0):
"""Convert bytes `b` to a string."""
return b.decode('utf8')
+ def binary_bytes(byte_values):
+ """Produce a byte string with the ints from `byte_values`."""
+ return bytes(byte_values)
+
+ def byte_to_int(byte_value):
+ """Turn an element of a bytes object into an int."""
+ return byte_value
+
+ def bytes_to_ints(bytes_value):
+ """Turn a bytes object into a sequence of ints."""
+ # In Py3, iterating bytes gives ints.
+ return bytes_value
+
else:
def to_bytes(s):
"""Convert string `s` to bytes (no-op in 2.x)."""
@@ -101,6 +162,19 @@ else:
"""Convert bytes `b` to a string (no-op in 2.x)."""
return b
+ def binary_bytes(byte_values):
+ """Produce a byte string with the ints from `byte_values`."""
+ return "".join([chr(b) for b in byte_values])
+
+ def byte_to_int(byte_value):
+ """Turn an element of a bytes object into an int."""
+ return ord(byte_value)
+
+ def bytes_to_ints(bytes_value):
+ """Turn a bytes object into a sequence of ints."""
+ for byte in bytes_value:
+ yield ord(byte)
+
# Md5 is available in different places.
try:
import hashlib
diff --git a/python/helpers/coverage/bytecode.py b/python/helpers/coverage/bytecode.py
index ab522d6c1c6c..85360638528e 100644
--- a/python/helpers/coverage/bytecode.py
+++ b/python/helpers/coverage/bytecode.py
@@ -1,14 +1,25 @@
"""Bytecode manipulation for coverage.py"""
-import opcode, sys, types
+import opcode, types
+
+from coverage.backward import byte_to_int
class ByteCode(object):
"""A single bytecode."""
def __init__(self):
+ # The offset of this bytecode in the code object.
self.offset = -1
+
+ # The opcode, defined in the `opcode` module.
self.op = -1
+
+ # The argument, a small integer, whose meaning depends on the opcode.
self.arg = -1
+
+ # The offset in the code object of the next bytecode.
self.next_offset = -1
+
+ # The offset to jump to.
self.jump_to = -1
@@ -18,44 +29,34 @@ class ByteCodes(object):
Returns `ByteCode` objects.
"""
+ # pylint: disable=R0924
def __init__(self, code):
self.code = code
- self.offset = 0
- if sys.version_info >= (3, 0):
- def __getitem__(self, i):
- return self.code[i]
- else:
- def __getitem__(self, i):
- return ord(self.code[i])
+ def __getitem__(self, i):
+ return byte_to_int(self.code[i])
def __iter__(self):
- return self
-
- def __next__(self):
- if self.offset >= len(self.code):
- raise StopIteration
+ offset = 0
+ while offset < len(self.code):
+ bc = ByteCode()
+ bc.op = self[offset]
+ bc.offset = offset
- bc = ByteCode()
- bc.op = self[self.offset]
- bc.offset = self.offset
+ next_offset = offset+1
+ if bc.op >= opcode.HAVE_ARGUMENT:
+ bc.arg = self[offset+1] + 256*self[offset+2]
+ next_offset += 2
- next_offset = self.offset+1
- if bc.op >= opcode.HAVE_ARGUMENT:
- bc.arg = self[self.offset+1] + 256*self[self.offset+2]
- next_offset += 2
+ label = -1
+ if bc.op in opcode.hasjrel:
+ label = next_offset + bc.arg
+ elif bc.op in opcode.hasjabs:
+ label = bc.arg
+ bc.jump_to = label
- label = -1
- if bc.op in opcode.hasjrel:
- label = next_offset + bc.arg
- elif bc.op in opcode.hasjabs:
- label = bc.arg
- bc.jump_to = label
-
- bc.next_offset = self.offset = next_offset
- return bc
-
- next = __next__ # Py2k uses an old-style non-dunder name.
+ bc.next_offset = offset = next_offset
+ yield bc
class CodeObjects(object):
@@ -64,18 +65,11 @@ class CodeObjects(object):
self.stack = [code]
def __iter__(self):
- return self
-
- def __next__(self):
- if self.stack:
+ while self.stack:
# We're going to return the code object on the stack, but first
# push its children for later returning.
code = self.stack.pop()
for c in code.co_consts:
if isinstance(c, types.CodeType):
self.stack.append(c)
- return code
-
- raise StopIteration
-
- next = __next__
+ yield code
diff --git a/python/helpers/coverage/cmdline.py b/python/helpers/coverage/cmdline.py
index 1ce5e0f54ad6..ea112a8b8f2d 100644
--- a/python/helpers/coverage/cmdline.py
+++ b/python/helpers/coverage/cmdline.py
@@ -1,10 +1,11 @@
"""Command-line support for Coverage."""
-import optparse, re, sys, traceback
+import optparse, os, sys, time, traceback
from coverage.backward import sorted # pylint: disable=W0622
from coverage.execfile import run_python_file, run_python_module
from coverage.misc import CoverageException, ExceptionDuringRun, NoSource
+from coverage.debug import info_formatter
class Opts(object):
@@ -19,11 +20,18 @@ class Opts(object):
'', '--branch', action='store_true',
help="Measure branch coverage in addition to statement coverage."
)
+ debug = optparse.make_option(
+ '', '--debug', action='store', metavar="OPTS",
+ help="Debug options, separated by commas"
+ )
directory = optparse.make_option(
- '-d', '--directory', action='store',
- metavar="DIR",
+ '-d', '--directory', action='store', metavar="DIR",
help="Write the output files to DIR."
)
+ fail_under = optparse.make_option(
+ '', '--fail-under', action='store', metavar="MIN", type="int",
+ help="Exit with a status of 2 if the total coverage is less than MIN."
+ )
help = optparse.make_option(
'-h', '--help', action='store_true',
help="Get help on this command."
@@ -89,6 +97,10 @@ class Opts(object):
help="Use a simpler but slower trace method. Try this if you get "
"seemingly impossible results!"
)
+ title = optparse.make_option(
+ '', '--title', action='store', metavar="TITLE",
+ help="A text string to use as the title on the HTML."
+ )
version = optparse.make_option(
'', '--version', action='store_true',
help="Display version information and exit."
@@ -110,7 +122,9 @@ class CoverageOptionParser(optparse.OptionParser, object):
self.set_defaults(
actions=[],
branch=None,
+ debug=None,
directory=None,
+ fail_under=None,
help=None,
ignore_errors=None,
include=None,
@@ -122,6 +136,7 @@ class CoverageOptionParser(optparse.OptionParser, object):
show_missing=None,
source=None,
timid=None,
+ title=None,
erase_first=None,
version=None,
)
@@ -273,9 +288,11 @@ CMDS = {
'html': CmdOptionParser("html",
[
Opts.directory,
+ Opts.fail_under,
Opts.ignore_errors,
Opts.omit,
Opts.include,
+ Opts.title,
] + GLOBAL_ARGS,
usage = "[options] [modules]",
description = "Create an HTML report of the coverage of the files. "
@@ -285,6 +302,7 @@ CMDS = {
'report': CmdOptionParser("report",
[
+ Opts.fail_under,
Opts.ignore_errors,
Opts.omit,
Opts.include,
@@ -298,6 +316,7 @@ CMDS = {
[
Opts.append,
Opts.branch,
+ Opts.debug,
Opts.pylib,
Opts.parallel_mode,
Opts.module,
@@ -314,20 +333,20 @@ CMDS = {
'xml': CmdOptionParser("xml",
[
+ Opts.fail_under,
Opts.ignore_errors,
Opts.omit,
Opts.include,
Opts.output_xml,
] + GLOBAL_ARGS,
cmd = "xml",
- defaults = {'outfile': 'coverage.xml'},
usage = "[options] [modules]",
description = "Generate an XML report of coverage results."
),
}
-OK, ERR = 0, 1
+OK, ERR, FAIL_UNDER = 0, 1, 2
class CoverageScript(object):
@@ -346,27 +365,10 @@ class CoverageScript(object):
self.run_python_file = _run_python_file or run_python_file
self.run_python_module = _run_python_module or run_python_module
self.help_fn = _help_fn or self.help
+ self.classic = False
self.coverage = None
- def help(self, error=None, topic=None, parser=None):
- """Display an error message, or the named topic."""
- assert error or topic or parser
- if error:
- print(error)
- print("Use 'coverage help' for help.")
- elif parser:
- print(parser.format_help().strip())
- else:
- # Parse out the topic we want from HELP_TOPICS
- topic_list = re.split("(?m)^=+ (\w+) =+$", HELP_TOPICS)
- topics = dict(zip(topic_list[1::2], topic_list[2::2]))
- help_msg = topics.get(topic, '').strip()
- if help_msg:
- print(help_msg % self.covpkg.__dict__)
- else:
- print("Don't know topic %r" % topic)
-
def command_line(self, argv):
"""The bulk of the command line interface to Coverage.
@@ -376,15 +378,14 @@ class CoverageScript(object):
"""
# Collect the command-line options.
-
if not argv:
self.help_fn(topic='minimum_help')
return OK
# The command syntax we parse depends on the first argument. Classic
# syntax always starts with an option.
- classic = argv[0].startswith('-')
- if classic:
+ self.classic = argv[0].startswith('-')
+ if self.classic:
parser = ClassicOptionParser()
else:
parser = CMDS.get(argv[0])
@@ -398,64 +399,19 @@ class CoverageScript(object):
if not ok:
return ERR
- # Handle help.
- if options.help:
- if classic:
- self.help_fn(topic='help')
- else:
- self.help_fn(parser=parser)
- return OK
-
- if "help" in options.actions:
- if args:
- for a in args:
- parser = CMDS.get(a)
- if parser:
- self.help_fn(parser=parser)
- else:
- self.help_fn(topic=a)
- else:
- self.help_fn(topic='help')
- return OK
-
- # Handle version.
- if options.version:
- self.help_fn(topic='version')
+ # Handle help and version.
+ if self.do_help(options, args, parser):
return OK
# Check for conflicts and problems in the options.
- for i in ['erase', 'execute']:
- for j in ['annotate', 'html', 'report', 'combine']:
- if (i in options.actions) and (j in options.actions):
- self.help_fn("You can't specify the '%s' and '%s' "
- "options at the same time." % (i, j))
- return ERR
-
- if not options.actions:
- self.help_fn(
- "You must specify at least one of -e, -x, -c, -r, -a, or -b."
- )
- return ERR
- args_allowed = (
- 'execute' in options.actions or
- 'annotate' in options.actions or
- 'html' in options.actions or
- 'debug' in options.actions or
- 'report' in options.actions or
- 'xml' in options.actions
- )
- if not args_allowed and args:
- self.help_fn("Unexpected arguments: %s" % " ".join(args))
- return ERR
-
- if 'execute' in options.actions and not args:
- self.help_fn("Nothing to do.")
+ if not self.args_ok(options, args):
return ERR
# Listify the list options.
source = unshell_list(options.source)
omit = unshell_list(options.omit)
include = unshell_list(options.include)
+ debug = unshell_list(options.debug)
# Do something.
self.coverage = self.covpkg.coverage(
@@ -467,41 +423,11 @@ class CoverageScript(object):
source = source,
omit = omit,
include = include,
+ debug = debug,
)
if 'debug' in options.actions:
- if not args:
- self.help_fn("What information would you like: data, sys?")
- return ERR
- for info in args:
- if info == 'sys':
- print("-- sys ----------------------------------------")
- for label, info in self.coverage.sysinfo():
- if info == []:
- info = "-none-"
- if isinstance(info, list):
- print("%15s:" % label)
- for e in info:
- print("%15s %s" % ("", e))
- else:
- print("%15s: %s" % (label, info))
- elif info == 'data':
- print("-- data ---------------------------------------")
- self.coverage.load()
- print("path: %s" % self.coverage.data.filename)
- print("has_arcs: %r" % self.coverage.data.has_arcs())
- summary = self.coverage.data.summary(fullpath=True)
- if summary:
- filenames = sorted(summary.keys())
- print("\n%d files:" % len(filenames))
- for f in filenames:
- print("%s: %d lines" % (f, summary[f]))
- else:
- print("No data collected")
- else:
- self.help_fn("Don't know what you mean by %r" % info)
- return ERR
- return OK
+ return self.do_debug(args)
if 'erase' in options.actions or options.erase_first:
self.coverage.erase()
@@ -509,22 +435,7 @@ class CoverageScript(object):
self.coverage.load()
if 'execute' in options.actions:
- # Run the script.
- self.coverage.start()
- code_ran = True
- try:
- try:
- if options.module:
- self.run_python_module(args[0], args)
- else:
- self.run_python_file(args[0], args)
- except NoSource:
- code_ran = False
- raise
- finally:
- if code_ran:
- self.coverage.stop()
- self.coverage.save()
+ self.do_execute(options, args)
if 'combine' in options.actions:
self.coverage.combine()
@@ -539,18 +450,167 @@ class CoverageScript(object):
)
if 'report' in options.actions:
- self.coverage.report(
+ total = self.coverage.report(
show_missing=options.show_missing, **report_args)
if 'annotate' in options.actions:
self.coverage.annotate(
directory=options.directory, **report_args)
if 'html' in options.actions:
- self.coverage.html_report(
- directory=options.directory, **report_args)
+ total = self.coverage.html_report(
+ directory=options.directory, title=options.title,
+ **report_args)
if 'xml' in options.actions:
outfile = options.outfile
- self.coverage.xml_report(outfile=outfile, **report_args)
+ total = self.coverage.xml_report(outfile=outfile, **report_args)
+ if options.fail_under is not None:
+ if total >= options.fail_under:
+ return OK
+ else:
+ return FAIL_UNDER
+ else:
+ return OK
+
+ def help(self, error=None, topic=None, parser=None):
+ """Display an error message, or the named topic."""
+ assert error or topic or parser
+ if error:
+ print(error)
+ print("Use 'coverage help' for help.")
+ elif parser:
+ print(parser.format_help().strip())
+ else:
+ help_msg = HELP_TOPICS.get(topic, '').strip()
+ if help_msg:
+ print(help_msg % self.covpkg.__dict__)
+ else:
+ print("Don't know topic %r" % topic)
+
+ def do_help(self, options, args, parser):
+ """Deal with help requests.
+
+ Return True if it handled the request, False if not.
+
+ """
+ # Handle help.
+ if options.help:
+ if self.classic:
+ self.help_fn(topic='help')
+ else:
+ self.help_fn(parser=parser)
+ return True
+
+ if "help" in options.actions:
+ if args:
+ for a in args:
+ parser = CMDS.get(a)
+ if parser:
+ self.help_fn(parser=parser)
+ else:
+ self.help_fn(topic=a)
+ else:
+ self.help_fn(topic='help')
+ return True
+
+ # Handle version.
+ if options.version:
+ self.help_fn(topic='version')
+ return True
+
+ return False
+
+ def args_ok(self, options, args):
+ """Check for conflicts and problems in the options.
+
+ Returns True if everything is ok, or False if not.
+
+ """
+ for i in ['erase', 'execute']:
+ for j in ['annotate', 'html', 'report', 'combine']:
+ if (i in options.actions) and (j in options.actions):
+ self.help_fn("You can't specify the '%s' and '%s' "
+ "options at the same time." % (i, j))
+ return False
+
+ if not options.actions:
+ self.help_fn(
+ "You must specify at least one of -e, -x, -c, -r, -a, or -b."
+ )
+ return False
+ args_allowed = (
+ 'execute' in options.actions or
+ 'annotate' in options.actions or
+ 'html' in options.actions or
+ 'debug' in options.actions or
+ 'report' in options.actions or
+ 'xml' in options.actions
+ )
+ if not args_allowed and args:
+ self.help_fn("Unexpected arguments: %s" % " ".join(args))
+ return False
+
+ if 'execute' in options.actions and not args:
+ self.help_fn("Nothing to do.")
+ return False
+
+ return True
+
+ def do_execute(self, options, args):
+ """Implementation of 'coverage run'."""
+
+ # Set the first path element properly.
+ old_path0 = sys.path[0]
+
+ # Run the script.
+ self.coverage.start()
+ code_ran = True
+ try:
+ try:
+ if options.module:
+ sys.path[0] = ''
+ self.run_python_module(args[0], args)
+ else:
+ filename = args[0]
+ sys.path[0] = os.path.abspath(os.path.dirname(filename))
+ self.run_python_file(filename, args)
+ except NoSource:
+ code_ran = False
+ raise
+ finally:
+ self.coverage.stop()
+ if code_ran:
+ self.coverage.save()
+
+ # Restore the old path
+ sys.path[0] = old_path0
+
+ def do_debug(self, args):
+ """Implementation of 'coverage debug'."""
+
+ if not args:
+ self.help_fn("What information would you like: data, sys?")
+ return ERR
+ for info in args:
+ if info == 'sys':
+ print("-- sys ----------------------------------------")
+ for line in info_formatter(self.coverage.sysinfo()):
+ print(" %s" % line)
+ elif info == 'data':
+ print("-- data ---------------------------------------")
+ self.coverage.load()
+ print("path: %s" % self.coverage.data.filename)
+ print("has_arcs: %r" % self.coverage.data.has_arcs())
+ summary = self.coverage.data.summary(fullpath=True)
+ if summary:
+ filenames = sorted(summary.keys())
+ print("\n%d files:" % len(filenames))
+ for f in filenames:
+ print("%s: %d lines" % (f, summary[f]))
+ else:
+ print("No data collected")
+ else:
+ self.help_fn("Don't know what you mean by %r" % info)
+ return ERR
return OK
@@ -568,10 +628,10 @@ def unshell_list(s):
return s.split(',')
-HELP_TOPICS = r"""
-
-== classic ====================================================================
-Coverage.py version %(__version__)s
+HELP_TOPICS = {
+# -------------------------
+'classic':
+r"""Coverage.py version %(__version__)s
Measure, collect, and report on code coverage in Python programs.
Usage:
@@ -615,8 +675,9 @@ coverage -a [-d DIR] [-i] [-o DIR,...] [FILE1 FILE2 ...]
Coverage data is saved in the file .coverage by default. Set the
COVERAGE_FILE environment variable to save it somewhere else.
-
-== help =======================================================================
+""",
+# -------------------------
+'help': """\
Coverage.py, version %(__version__)s
Measure, collect, and report on code coverage in Python programs.
@@ -635,26 +696,32 @@ Commands:
Use "coverage help <command>" for detailed help on any command.
Use "coverage help classic" for help on older command syntax.
For more information, see %(__url__)s
-
-== minimum_help ===============================================================
+""",
+# -------------------------
+'minimum_help': """\
Code coverage for Python. Use 'coverage help' for help.
-
-== version ====================================================================
+""",
+# -------------------------
+'version': """\
Coverage.py, version %(__version__)s. %(__url__)s
-
-"""
+""",
+}
def main(argv=None):
- """The main entrypoint to Coverage.
+ """The main entry point to Coverage.
- This is installed as the script entrypoint.
+ This is installed as the script entry point.
"""
if argv is None:
argv = sys.argv[1:]
try:
+ start = time.clock()
status = CoverageScript().command_line(argv)
+ end = time.clock()
+ if 0:
+ print("time: %.3fs" % (end - start))
except ExceptionDuringRun:
# An exception was caught while running the product code. The
# sys.exc_info() return tuple is packed into an ExceptionDuringRun
diff --git a/python/helpers/coverage/codeunit.py b/python/helpers/coverage/codeunit.py
index 55f44a240acf..ca1ae5c56d6b 100644
--- a/python/helpers/coverage/codeunit.py
+++ b/python/helpers/coverage/codeunit.py
@@ -52,8 +52,10 @@ class CodeUnit(object):
else:
f = morf
# .pyc files should always refer to a .py instead.
- if f.endswith('.pyc'):
+ if f.endswith('.pyc') or f.endswith('.pyo'):
f = f[:-1]
+ elif f.endswith('$py.class'): # Jython
+ f = f[:-9] + ".py"
self.filename = self.file_locator.canonical_filename(f)
if hasattr(morf, '__name__'):
@@ -77,12 +79,18 @@ class CodeUnit(object):
# Annoying comparison operators. Py3k wants __lt__ etc, and Py2k needs all
# of them defined.
- def __lt__(self, other): return self.name < other.name
- def __le__(self, other): return self.name <= other.name
- def __eq__(self, other): return self.name == other.name
- def __ne__(self, other): return self.name != other.name
- def __gt__(self, other): return self.name > other.name
- def __ge__(self, other): return self.name >= other.name
+ def __lt__(self, other):
+ return self.name < other.name
+ def __le__(self, other):
+ return self.name <= other.name
+ def __eq__(self, other):
+ return self.name == other.name
+ def __ne__(self, other):
+ return self.name != other.name
+ def __gt__(self, other):
+ return self.name > other.name
+ def __ge__(self, other):
+ return self.name >= other.name
def flat_rootname(self):
"""A base for a flat filename to correspond to this code unit.
@@ -113,5 +121,25 @@ class CodeUnit(object):
# Couldn't find source.
raise CoverageException(
- "No source for code %r." % self.filename
+ "No source for code '%s'." % self.filename
)
+
+ def should_be_python(self):
+ """Does it seem like this file should contain Python?
+
+ This is used to decide if a file reported as part of the exection of
+ a program was really likely to have contained Python in the first
+ place.
+
+ """
+ # Get the file extension.
+ _, ext = os.path.splitext(self.filename)
+
+ # Anything named *.py* should be Python.
+ if ext.startswith('.py'):
+ return True
+ # A file with no extension should be Python.
+ if not ext:
+ return True
+ # Everything else is probably not Python.
+ return False
diff --git a/python/helpers/coverage/collector.py b/python/helpers/coverage/collector.py
index 9c40d16c7b19..8ba7d87cd4e0 100644
--- a/python/helpers/coverage/collector.py
+++ b/python/helpers/coverage/collector.py
@@ -1,13 +1,24 @@
"""Raw data collector for Coverage."""
-import sys, threading
+import os, sys, threading
try:
# Use the C extension code when we can, for speed.
- from coverage.tracer import Tracer
+ from coverage.tracer import CTracer # pylint: disable=F0401,E0611
except ImportError:
# Couldn't import the C extension, maybe it isn't built.
- Tracer = None
+ if os.getenv('COVERAGE_TEST_TRACER') == 'c':
+ # During testing, we use the COVERAGE_TEST_TRACER env var to indicate
+ # that we've fiddled with the environment to test this fallback code.
+ # If we thought we had a C tracer, but couldn't import it, then exit
+ # quickly and clearly instead of dribbling confusing errors. I'm using
+ # sys.exit here instead of an exception because an exception here
+ # causes all sorts of other noise in unittest.
+ sys.stderr.write(
+ "*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n"
+ )
+ sys.exit(1)
+ CTracer = None
class PyTracer(object):
@@ -40,12 +51,19 @@ class PyTracer(object):
self.last_exc_back = None
self.last_exc_firstlineno = 0
self.arcs = False
+ self.thread = None
+ self.stopped = False
def _trace(self, frame, event, arg_unused):
"""The trace function passed to sys.settrace."""
- #print("trace event: %s %r @%d" % (
- # event, frame.f_code.co_filename, frame.f_lineno))
+ if self.stopped:
+ return
+
+ if 0:
+ sys.stderr.write("trace event: %s %r @%d\n" % (
+ event, frame.f_code.co_filename, frame.f_lineno
+ ))
if self.last_exc_back:
if frame == self.last_exc_back:
@@ -61,10 +79,11 @@ class PyTracer(object):
# in this file.
self.data_stack.append((self.cur_file_data, self.last_line))
filename = frame.f_code.co_filename
- tracename = self.should_trace_cache.get(filename)
- if tracename is None:
+ if filename not in self.should_trace_cache:
tracename = self.should_trace(filename, frame)
self.should_trace_cache[filename] = tracename
+ else:
+ tracename = self.should_trace_cache[filename]
#print("called, stack is %d deep, tracename is %r" % (
# len(self.data_stack), tracename))
if tracename:
@@ -105,15 +124,24 @@ class PyTracer(object):
Return a Python function suitable for use with sys.settrace().
"""
+ self.thread = threading.currentThread()
sys.settrace(self._trace)
return self._trace
def stop(self):
"""Stop this Tracer."""
+ self.stopped = True
+ if self.thread != threading.currentThread():
+ # Called on a different thread than started us: we can't unhook
+ # ourseves, but we've set the flag that we should stop, so we won't
+ # do any more tracing.
+ return
+
if hasattr(sys, "gettrace") and self.warn:
if sys.gettrace() != self._trace:
msg = "Trace function changed, measurement is likely wrong: %r"
- self.warn(msg % sys.gettrace())
+ self.warn(msg % (sys.gettrace(),))
+ #print("Stopping tracer on %s" % threading.current_thread().ident)
sys.settrace(None)
def get_stats(self):
@@ -146,7 +174,7 @@ class Collector(object):
"""Create a collector.
`should_trace` is a function, taking a filename, and returning a
- canonicalized filename, or False depending on whether the file should
+ canonicalized filename, or None depending on whether the file should
be traced or not.
If `timid` is true, then a slower simpler trace function will be
@@ -173,7 +201,7 @@ class Collector(object):
else:
# Being fast: use the C Tracer if it is available, else the Python
# trace function.
- self._trace_class = Tracer or PyTracer
+ self._trace_class = CTracer or PyTracer
def __repr__(self):
return "<Collector at 0x%x>" % id(self)
@@ -190,7 +218,7 @@ class Collector(object):
# A cache of the results from should_trace, the decision about whether
# to trace execution in a file. A dict of filename to (filename or
- # False).
+ # None).
self.should_trace_cache = {}
# Our active Tracers.
@@ -232,9 +260,29 @@ class Collector(object):
if self._collectors:
self._collectors[-1].pause()
self._collectors.append(self)
- #print >>sys.stderr, "Started: %r" % self._collectors
+ #print("Started: %r" % self._collectors, file=sys.stderr)
+
+ # Check to see whether we had a fullcoverage tracer installed.
+ traces0 = []
+ if hasattr(sys, "gettrace"):
+ fn0 = sys.gettrace()
+ if fn0:
+ tracer0 = getattr(fn0, '__self__', None)
+ if tracer0:
+ traces0 = getattr(tracer0, 'traces', [])
+
# Install the tracer on this thread.
- self._start_tracer()
+ fn = self._start_tracer()
+
+ for args in traces0:
+ (frame, event, arg), lineno = args
+ try:
+ fn(frame, event, arg, lineno=lineno)
+ except TypeError:
+ raise Exception(
+ "fullcoverage must be run with the C trace function."
+ )
+
# Install our installation tracer in threading, to jump start other
# threads.
threading.settrace(self._installation_trace)
diff --git a/python/helpers/coverage/config.py b/python/helpers/coverage/config.py
index 6b441ddc45fc..87318ff12452 100644
--- a/python/helpers/coverage/config.py
+++ b/python/helpers/coverage/config.py
@@ -1,7 +1,76 @@
"""Config file for coverage.py"""
-import os
-from coverage.backward import configparser # pylint: disable=W0622
+import os, re, sys
+from coverage.backward import string_class, iitems
+
+# In py3, # ConfigParser was renamed to the more-standard configparser
+try:
+ import configparser # pylint: disable=F0401
+except ImportError:
+ import ConfigParser as configparser
+
+
+class HandyConfigParser(configparser.RawConfigParser):
+ """Our specialization of ConfigParser."""
+
+ def read(self, filename):
+ """Read a filename as UTF-8 configuration data."""
+ kwargs = {}
+ if sys.version_info >= (3, 2):
+ kwargs['encoding'] = "utf-8"
+ return configparser.RawConfigParser.read(self, filename, **kwargs)
+
+ def get(self, *args, **kwargs):
+ v = configparser.RawConfigParser.get(self, *args, **kwargs)
+ def dollar_replace(m):
+ """Called for each $replacement."""
+ # Only one of the groups will have matched, just get its text.
+ word = [w for w in m.groups() if w is not None][0]
+ if word == "$":
+ return "$"
+ else:
+ return os.environ.get(word, '')
+
+ dollar_pattern = r"""(?x) # Use extended regex syntax
+ \$(?: # A dollar sign, then
+ (?P<v1>\w+) | # a plain word,
+ {(?P<v2>\w+)} | # or a {-wrapped word,
+ (?P<char>[$]) # or a dollar sign.
+ )
+ """
+ v = re.sub(dollar_pattern, dollar_replace, v)
+ return v
+
+ def getlist(self, section, option):
+ """Read a list of strings.
+
+ The value of `section` and `option` is treated as a comma- and newline-
+ separated list of strings. Each value is stripped of whitespace.
+
+ Returns the list of strings.
+
+ """
+ value_list = self.get(section, option)
+ values = []
+ for value_line in value_list.split('\n'):
+ for value in value_line.split(','):
+ value = value.strip()
+ if value:
+ values.append(value)
+ return values
+
+ def getlinelist(self, section, option):
+ """Read a list of full-line strings.
+
+ The value of `section` and `option` is treated as a newline-separated
+ list of strings. Each value is stripped of whitespace.
+
+ Returns the list of strings.
+
+ """
+ value_list = self.get(section, option)
+ return list(filter(None, value_list.split('\n')))
+
# The default line exclusion regexes
DEFAULT_EXCLUDE = [
@@ -29,9 +98,12 @@ class CoverageConfig(object):
operation of coverage.py.
"""
-
def __init__(self):
"""Initialize the configuration attributes to their defaults."""
+ # Metadata about the config.
+ self.attempted_config_files = []
+ self.config_files = []
+
# Defaults for [run]
self.branch = False
self.cover_pylib = False
@@ -39,6 +111,7 @@ class CoverageConfig(object):
self.parallel = False
self.timid = False
self.source = None
+ self.debug = []
# Defaults for [report]
self.exclude_list = DEFAULT_EXCLUDE[:]
@@ -48,13 +121,19 @@ class CoverageConfig(object):
self.partial_list = DEFAULT_PARTIAL[:]
self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:]
self.precision = 0
+ self.show_missing = False
# Defaults for [html]
self.html_dir = "htmlcov"
+ self.extra_css = None
+ self.html_title = "Coverage report"
# Defaults for [xml]
self.xml_output = "coverage.xml"
+ # Defaults for [paths]
+ self.paths = {}
+
def from_environment(self, env_var):
"""Read configuration from the `env_var` environment variable."""
# Timidity: for nose users, read an environment variable. This is a
@@ -64,93 +143,71 @@ class CoverageConfig(object):
if env:
self.timid = ('--timid' in env)
+ MUST_BE_LIST = ["omit", "include", "debug"]
+
def from_args(self, **kwargs):
"""Read config values from `kwargs`."""
- for k, v in kwargs.items():
+ for k, v in iitems(kwargs):
if v is not None:
+ if k in self.MUST_BE_LIST and isinstance(v, string_class):
+ v = [v]
setattr(self, k, v)
- def from_file(self, *files):
- """Read configuration from .rc files.
+ def from_file(self, filename):
+ """Read configuration from a .rc file.
- Each argument in `files` is a file name to read.
+ `filename` is a file name to read.
"""
- cp = configparser.RawConfigParser()
- cp.read(files)
+ self.attempted_config_files.append(filename)
+ cp = HandyConfigParser()
+ files_read = cp.read(filename)
+ if files_read is not None: # return value changed in 2.4
+ self.config_files.extend(files_read)
+
+ for option_spec in self.CONFIG_FILE_OPTIONS:
+ self.set_attr_from_config_option(cp, *option_spec)
+
+ # [paths] is special
+ if cp.has_section('paths'):
+ for option in cp.options('paths'):
+ self.paths[option] = cp.getlist('paths', option)
+
+ CONFIG_FILE_OPTIONS = [
# [run]
- if cp.has_option('run', 'branch'):
- self.branch = cp.getboolean('run', 'branch')
- if cp.has_option('run', 'cover_pylib'):
- self.cover_pylib = cp.getboolean('run', 'cover_pylib')
- if cp.has_option('run', 'data_file'):
- self.data_file = cp.get('run', 'data_file')
- if cp.has_option('run', 'include'):
- self.include = self.get_list(cp, 'run', 'include')
- if cp.has_option('run', 'omit'):
- self.omit = self.get_list(cp, 'run', 'omit')
- if cp.has_option('run', 'parallel'):
- self.parallel = cp.getboolean('run', 'parallel')
- if cp.has_option('run', 'source'):
- self.source = self.get_list(cp, 'run', 'source')
- if cp.has_option('run', 'timid'):
- self.timid = cp.getboolean('run', 'timid')
+ ('branch', 'run:branch', 'boolean'),
+ ('cover_pylib', 'run:cover_pylib', 'boolean'),
+ ('data_file', 'run:data_file'),
+ ('debug', 'run:debug', 'list'),
+ ('include', 'run:include', 'list'),
+ ('omit', 'run:omit', 'list'),
+ ('parallel', 'run:parallel', 'boolean'),
+ ('source', 'run:source', 'list'),
+ ('timid', 'run:timid', 'boolean'),
# [report]
- if cp.has_option('report', 'exclude_lines'):
- self.exclude_list = \
- self.get_line_list(cp, 'report', 'exclude_lines')
- if cp.has_option('report', 'ignore_errors'):
- self.ignore_errors = cp.getboolean('report', 'ignore_errors')
- if cp.has_option('report', 'include'):
- self.include = self.get_list(cp, 'report', 'include')
- if cp.has_option('report', 'omit'):
- self.omit = self.get_list(cp, 'report', 'omit')
- if cp.has_option('report', 'partial_branches'):
- self.partial_list = \
- self.get_line_list(cp, 'report', 'partial_branches')
- if cp.has_option('report', 'partial_branches_always'):
- self.partial_always_list = \
- self.get_line_list(cp, 'report', 'partial_branches_always')
- if cp.has_option('report', 'precision'):
- self.precision = cp.getint('report', 'precision')
+ ('exclude_list', 'report:exclude_lines', 'linelist'),
+ ('ignore_errors', 'report:ignore_errors', 'boolean'),
+ ('include', 'report:include', 'list'),
+ ('omit', 'report:omit', 'list'),
+ ('partial_list', 'report:partial_branches', 'linelist'),
+ ('partial_always_list', 'report:partial_branches_always', 'linelist'),
+ ('precision', 'report:precision', 'int'),
+ ('show_missing', 'report:show_missing', 'boolean'),
# [html]
- if cp.has_option('html', 'directory'):
- self.html_dir = cp.get('html', 'directory')
+ ('html_dir', 'html:directory'),
+ ('extra_css', 'html:extra_css'),
+ ('html_title', 'html:title'),
# [xml]
- if cp.has_option('xml', 'output'):
- self.xml_output = cp.get('xml', 'output')
-
- def get_list(self, cp, section, option):
- """Read a list of strings from the ConfigParser `cp`.
-
- The value of `section` and `option` is treated as a comma- and newline-
- separated list of strings. Each value is stripped of whitespace.
-
- Returns the list of strings.
-
- """
- value_list = cp.get(section, option)
- values = []
- for value_line in value_list.split('\n'):
- for value in value_line.split(','):
- value = value.strip()
- if value:
- values.append(value)
- return values
-
- def get_line_list(self, cp, section, option):
- """Read a list of full-line strings from the ConfigParser `cp`.
-
- The value of `section` and `option` is treated as a newline-separated
- list of strings. Each value is stripped of whitespace.
-
- Returns the list of strings.
-
- """
- value_list = cp.get(section, option)
- return list(filter(None, value_list.split('\n')))
-
+ ('xml_output', 'xml:output'),
+ ]
+
+ def set_attr_from_config_option(self, cp, attr, where, type_=''):
+ """Set an attribute on self if it exists in the ConfigParser."""
+ section, option = where.split(":")
+ if cp.has_option(section, option):
+ method = getattr(cp, 'get'+type_)
+ setattr(self, attr, method(section, option))
diff --git a/python/helpers/coverage/control.py b/python/helpers/coverage/control.py
index 5ca1ef9538cc..f75a3dda5b1b 100644
--- a/python/helpers/coverage/control.py
+++ b/python/helpers/coverage/control.py
@@ -3,21 +3,31 @@
import atexit, os, random, socket, sys
from coverage.annotate import AnnotateReporter
-from coverage.backward import string_class
+from coverage.backward import string_class, iitems, sorted # pylint: disable=W0622
from coverage.codeunit import code_unit_factory, CodeUnit
from coverage.collector import Collector
from coverage.config import CoverageConfig
from coverage.data import CoverageData
+from coverage.debug import DebugControl
from coverage.files import FileLocator, TreeMatcher, FnmatchMatcher
-from coverage.files import find_python_files
+from coverage.files import PathAliases, find_python_files, prep_patterns
from coverage.html import HtmlReporter
from coverage.misc import CoverageException, bool_or_none, join_regex
+from coverage.misc import file_be_gone
from coverage.results import Analysis, Numbers
from coverage.summary import SummaryReporter
from coverage.xmlreport import XmlReporter
+# Pypy has some unusual stuff in the "stdlib". Consider those locations
+# when deciding where the stdlib is.
+try:
+ import _structseq # pylint: disable=F0401
+except ImportError:
+ _structseq = None
+
+
class coverage(object):
- """Programmatic access to Coverage.
+ """Programmatic access to coverage.py.
To use::
@@ -25,14 +35,15 @@ class coverage(object):
cov = coverage()
cov.start()
- #.. blah blah (run your code) blah blah ..
+ #.. call your code ..
cov.stop()
cov.html_report(directory='covhtml')
"""
def __init__(self, data_file=None, data_suffix=None, cover_pylib=None,
auto_data=False, timid=None, branch=None, config_file=True,
- source=None, omit=None, include=None):
+ source=None, omit=None, include=None, debug=None,
+ debug_file=None):
"""
`data_file` is the base name of the data file to use, defaulting to
".coverage". `data_suffix` is appended (with a dot) to `data_file` to
@@ -67,6 +78,10 @@ class coverage(object):
`include` will be measured, files that match `omit` will not. Each
will also accept a single string argument.
+ `debug` is a list of strings indicating what debugging information is
+ desired. `debug_file` is the file to write debug messages to,
+ defaulting to stderr.
+
"""
from coverage import __version__
@@ -96,18 +111,16 @@ class coverage(object):
self.config.data_file = env_data_file
# 4: from constructor arguments:
- if isinstance(omit, string_class):
- omit = [omit]
- if isinstance(include, string_class):
- include = [include]
self.config.from_args(
data_file=data_file, cover_pylib=cover_pylib, timid=timid,
branch=branch, parallel=bool_or_none(data_suffix),
- source=source, omit=omit, include=include
+ source=source, omit=omit, include=include, debug=debug,
)
+ # Create and configure the debugging controller.
+ self.debug = DebugControl(self.config.debug, debug_file or sys.stderr)
+
self.auto_data = auto_data
- self.atexit_registered = False
# _exclude_re is a dict mapping exclusion list names to compiled
# regexes.
@@ -125,8 +138,8 @@ class coverage(object):
else:
self.source_pkgs.append(src)
- self.omit = self._prep_patterns(self.config.omit)
- self.include = self._prep_patterns(self.config.include)
+ self.omit = prep_patterns(self.config.omit)
+ self.include = prep_patterns(self.config.include)
self.collector = Collector(
self._should_trace, timid=self.config.timid,
@@ -151,7 +164,8 @@ class coverage(object):
# started rather than wherever the process eventually chdir'd to.
self.data = CoverageData(
basename=self.config.data_file,
- collector="coverage v%s" % __version__
+ collector="coverage v%s" % __version__,
+ debug=self.debug,
)
# The dirs for files considered "installed with the interpreter".
@@ -162,9 +176,9 @@ class coverage(object):
# environments (virtualenv, for example), these modules may be
# spread across a few locations. Look at all the candidate modules
# we've imported, and take all the different ones.
- for m in (atexit, os, random, socket):
- if hasattr(m, "__file__"):
- m_dir = self._canonical_dir(m.__file__)
+ for m in (atexit, os, random, socket, _structseq):
+ if m is not None and hasattr(m, "__file__"):
+ m_dir = self._canonical_dir(m)
if m_dir not in self.pylib_dirs:
self.pylib_dirs.append(m_dir)
@@ -172,63 +186,61 @@ class coverage(object):
# where we are.
self.cover_dir = self._canonical_dir(__file__)
- # The matchers for _should_trace, created when tracing starts.
+ # The matchers for _should_trace.
self.source_match = None
self.pylib_match = self.cover_match = None
self.include_match = self.omit_match = None
- # Only _harvest_data once per measurement cycle.
- self._harvested = False
-
# Set the reporting precision.
Numbers.set_precision(self.config.precision)
- # When tearing down the coverage object, modules can become None.
- # Saving the modules as object attributes avoids problems, but it is
- # quite ad-hoc which modules need to be saved and which references
- # need to use the object attributes.
- self.socket = socket
- self.os = os
- self.random = random
+ # Is it ok for no data to be collected?
+ self._warn_no_data = True
+ self._warn_unimported_source = True
+
+ # State machine variables:
+ # Have we started collecting and not stopped it?
+ self._started = False
+ # Have we measured some data and not harvested it?
+ self._measured = False
- def _canonical_dir(self, f):
- """Return the canonical directory of the file `f`."""
- return os.path.split(self.file_locator.canonical_filename(f))[0]
+ atexit.register(self._atexit)
+
+ def _canonical_dir(self, morf):
+ """Return the canonical directory of the module or file `morf`."""
+ return os.path.split(CodeUnit(morf, self.file_locator).filename)[0]
def _source_for_file(self, filename):
"""Return the source file for `filename`."""
if not filename.endswith(".py"):
if filename[-4:-1] == ".py":
filename = filename[:-1]
+ elif filename.endswith("$py.class"): # jython
+ filename = filename[:-9] + ".py"
return filename
- def _should_trace(self, filename, frame):
- """Decide whether to trace execution in `filename`
+ def _should_trace_with_reason(self, filename, frame):
+ """Decide whether to trace execution in `filename`, with a reason.
This function is called from the trace function. As each new file name
is encountered, this function determines whether it is traced or not.
- Returns a canonicalized filename if it should be traced, False if it
- should not.
+ Returns a pair of values: the first indicates whether the file should
+ be traced: it's a canonicalized filename if it should be traced, None
+ if it should not. The second value is a string, the resason for the
+ decision.
"""
- if os is None:
- return False
+ if not filename:
+ # Empty string is pretty useless
+ return None, "empty string isn't a filename"
if filename.startswith('<'):
# Lots of non-file execution is represented with artificial
# filenames like "<string>", "<doctest readme.txt[0]>", or
# "<exec_function>". Don't ever trace these executions, since we
# can't do anything with the data later anyway.
- return False
-
- if filename.endswith(".html"):
- # Jinja and maybe other templating systems compile templates into
- # Python code, but use the template filename as the filename in
- # the compiled code. Of course, those filenames are useless later
- # so don't bother collecting. TODO: How should we really separate
- # out good file extensions from bad?
- return False
+ return None, "not a real filename"
self._check_for_packages()
@@ -248,64 +260,53 @@ class coverage(object):
canonical = self.file_locator.canonical_filename(filename)
- # If the user specified source, then that's authoritative about what to
- # measure. If they didn't, then we have to exclude the stdlib and
- # coverage.py directories.
+ # If the user specified source or include, then that's authoritative
+ # about the outer bound of what to measure and we don't have to apply
+ # any canned exclusions. If they didn't, then we have to exclude the
+ # stdlib and coverage.py directories.
if self.source_match:
if not self.source_match.match(canonical):
- return False
+ return None, "falls outside the --source trees"
+ elif self.include_match:
+ if not self.include_match.match(canonical):
+ return None, "falls outside the --include trees"
else:
# If we aren't supposed to trace installed code, then check if this
# is near the Python standard library and skip it if so.
if self.pylib_match and self.pylib_match.match(canonical):
- return False
+ return None, "is in the stdlib"
# We exclude the coverage code itself, since a little of it will be
# measured otherwise.
if self.cover_match and self.cover_match.match(canonical):
- return False
+ return None, "is part of coverage.py"
- # Check the file against the include and omit patterns.
- if self.include_match and not self.include_match.match(canonical):
- return False
+ # Check the file against the omit pattern.
if self.omit_match and self.omit_match.match(canonical):
- return False
+ return None, "is inside an --omit pattern"
- return canonical
+ return canonical, "because we love you"
+
+ def _should_trace(self, filename, frame):
+ """Decide whether to trace execution in `filename`.
- # To log what should_trace returns, change this to "if 1:"
- if 0:
- _real_should_trace = _should_trace
- def _should_trace(self, filename, frame): # pylint: disable=E0102
- """A logging decorator around the real _should_trace function."""
- ret = self._real_should_trace(filename, frame)
- print("should_trace: %r -> %r" % (filename, ret))
- return ret
+ Calls `_should_trace_with_reason`, and returns just the decision.
+
+ """
+ canonical, reason = self._should_trace_with_reason(filename, frame)
+ if self.debug.should('trace'):
+ if not canonical:
+ msg = "Not tracing %r: %s" % (filename, reason)
+ else:
+ msg = "Tracing %r" % (filename,)
+ self.debug.write(msg)
+ return canonical
def _warn(self, msg):
"""Use `msg` as a warning."""
self._warnings.append(msg)
sys.stderr.write("Coverage.py warning: %s\n" % msg)
- def _prep_patterns(self, patterns):
- """Prepare the file patterns for use in a `FnmatchMatcher`.
-
- If a pattern starts with a wildcard, it is used as a pattern
- as-is. If it does not start with a wildcard, then it is made
- absolute with the current directory.
-
- If `patterns` is None, an empty list is returned.
-
- """
- patterns = patterns or []
- prepped = []
- for p in patterns or []:
- if p.startswith("*") or p.startswith("?"):
- prepped.append(p)
- else:
- prepped.append(self.file_locator.abs_file(p))
- return prepped
-
def _check_for_packages(self):
"""Update the source_match matcher with latest imported packages."""
# Our self.source_pkgs attribute is a list of package names we want to
@@ -325,17 +326,23 @@ class coverage(object):
try:
pkg_file = mod.__file__
except AttributeError:
- self._warn("Module %s has no Python source." % pkg)
+ pkg_file = None
else:
d, f = os.path.split(pkg_file)
- if f.startswith('__init__.'):
+ if f.startswith('__init__'):
# This is actually a package, return the directory.
pkg_file = d
else:
pkg_file = self._source_for_file(pkg_file)
pkg_file = self.file_locator.canonical_filename(pkg_file)
+ if not os.path.exists(pkg_file):
+ pkg_file = None
+
+ if pkg_file:
self.source.append(pkg_file)
self.source_match.add(pkg_file)
+ else:
+ self._warn("Module %s has no Python source." % pkg)
for pkg in found:
self.source_pkgs.remove(pkg)
@@ -354,17 +361,21 @@ class coverage(object):
self.data.read()
def start(self):
- """Start measuring code coverage."""
+ """Start measuring code coverage.
+
+ Coverage measurement actually occurs in functions called after `start`
+ is invoked. Statements in the same scope as `start` won't be measured.
+
+ Once you invoke `start`, you must also call `stop` eventually, or your
+ process might not shut down cleanly.
+
+ """
if self.run_suffix:
# Calling start() means we're running code, so use the run_suffix
# as the data_suffix when we eventually save the data.
self.data_suffix = self.run_suffix
if self.auto_data:
self.load()
- # Save coverage data when Python exits.
- if not self.atexit_registered:
- atexit.register(self.save)
- self.atexit_registered = True
# Create the matchers we need for _should_trace
if self.source or self.source_pkgs:
@@ -379,13 +390,31 @@ class coverage(object):
if self.omit:
self.omit_match = FnmatchMatcher(self.omit)
- self._harvested = False
+ # The user may want to debug things, show info if desired.
+ if self.debug.should('config'):
+ self.debug.write("Configuration values:")
+ config_info = sorted(self.config.__dict__.items())
+ self.debug.write_formatted_info(config_info)
+
+ if self.debug.should('sys'):
+ self.debug.write("Debugging info:")
+ self.debug.write_formatted_info(self.sysinfo())
+
self.collector.start()
+ self._started = True
+ self._measured = True
def stop(self):
"""Stop measuring code coverage."""
+ self._started = False
self.collector.stop()
- self._harvest_data()
+
+ def _atexit(self):
+ """Clean up on process shutdown."""
+ if self._started:
+ self.stop()
+ if self.auto_data:
+ self.save()
def erase(self):
"""Erase previously-collected coverage data.
@@ -449,9 +478,15 @@ class coverage(object):
# plenty of distinguishing information. We do this here in
# `save()` at the last minute so that the pid will be correct even
# if the process forks.
- data_suffix = "%s.%s.%06d" % (
- self.socket.gethostname(), self.os.getpid(),
- self.random.randint(0, 99999)
+ extra = ""
+ if _TEST_NAME_FILE:
+ f = open(_TEST_NAME_FILE)
+ test_name = f.read()
+ f.close()
+ extra = "." + test_name
+ data_suffix = "%s%s.%s.%06d" % (
+ socket.gethostname(), extra, os.getpid(),
+ random.randint(0, 999999)
)
self._harvest_data()
@@ -465,7 +500,14 @@ class coverage(object):
current measurements.
"""
- self.data.combine_parallel_data()
+ aliases = None
+ if self.config.paths:
+ aliases = PathAliases(self.file_locator)
+ for paths in self.config.paths.values():
+ result = paths[0]
+ for pattern in paths[1:]:
+ aliases.add(pattern, result)
+ self.data.combine_parallel_data(aliases=aliases)
def _harvest_data(self):
"""Get the collected data and reset the collector.
@@ -473,27 +515,37 @@ class coverage(object):
Also warn about various problems collecting data.
"""
- if not self._harvested:
- self.data.add_line_data(self.collector.get_line_data())
- self.data.add_arc_data(self.collector.get_arc_data())
- self.collector.reset()
+ if not self._measured:
+ return
- # If there are still entries in the source_pkgs list, then we never
- # encountered those packages.
+ self.data.add_line_data(self.collector.get_line_data())
+ self.data.add_arc_data(self.collector.get_arc_data())
+ self.collector.reset()
+
+ # If there are still entries in the source_pkgs list, then we never
+ # encountered those packages.
+ if self._warn_unimported_source:
for pkg in self.source_pkgs:
self._warn("Module %s was never imported." % pkg)
- # Find out if we got any data.
- summary = self.data.summary()
- if not summary:
- self._warn("No data was collected.")
+ # Find out if we got any data.
+ summary = self.data.summary()
+ if not summary and self._warn_no_data:
+ self._warn("No data was collected.")
+
+ # Find files that were never executed at all.
+ for src in self.source:
+ for py_file in find_python_files(src):
+ py_file = self.file_locator.canonical_filename(py_file)
+
+ if self.omit_match and self.omit_match.match(py_file):
+ # Turns out this file was omitted, so don't pull it back
+ # in as unexecuted.
+ continue
- # Find files that were never executed at all.
- for src in self.source:
- for py_file in find_python_files(src):
- self.data.touch_file(py_file)
+ self.data.touch_file(py_file)
- self._harvested = True
+ self._measured = False
# Backward compatibility with version 1.
def analysis(self, morf):
@@ -520,8 +572,11 @@ class coverage(object):
"""
analysis = self._analyze(morf)
return (
- analysis.filename, analysis.statements, analysis.excluded,
- analysis.missing, analysis.missing_formatted()
+ analysis.filename,
+ sorted(analysis.statements),
+ sorted(analysis.excluded),
+ sorted(analysis.missing),
+ analysis.missing_formatted(),
)
def _analyze(self, it):
@@ -530,6 +585,7 @@ class coverage(object):
Returns an `Analysis` object.
"""
+ self._harvest_data()
if not isinstance(it, CodeUnit):
it = code_unit_factory(it, self.file_locator)[0]
@@ -548,14 +604,16 @@ class coverage(object):
match those patterns will be included in the report. Modules matching
`omit` will not be included in the report.
+ Returns a float, the total percentage covered.
+
"""
+ self._harvest_data()
self.config.from_args(
- ignore_errors=ignore_errors, omit=omit, include=include
- )
- reporter = SummaryReporter(
- self, show_missing, self.config.ignore_errors
+ ignore_errors=ignore_errors, omit=omit, include=include,
+ show_missing=show_missing,
)
- reporter.report(morfs, outfile=file, config=self.config)
+ reporter = SummaryReporter(self, self.config)
+ return reporter.report(morfs, outfile=file)
def annotate(self, morfs=None, directory=None, ignore_errors=None,
omit=None, include=None):
@@ -569,25 +627,39 @@ class coverage(object):
See `coverage.report()` for other arguments.
"""
+ self._harvest_data()
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include
)
- reporter = AnnotateReporter(self, self.config.ignore_errors)
- reporter.report(morfs, config=self.config, directory=directory)
+ reporter = AnnotateReporter(self, self.config)
+ reporter.report(morfs, directory=directory)
def html_report(self, morfs=None, directory=None, ignore_errors=None,
- omit=None, include=None):
+ omit=None, include=None, extra_css=None, title=None):
"""Generate an HTML report.
+ The HTML is written to `directory`. The file "index.html" is the
+ overview starting point, with links to more detailed pages for
+ individual modules.
+
+ `extra_css` is a path to a file of other CSS to apply on the page.
+ It will be copied into the HTML directory.
+
+ `title` is a text string (not HTML) to use as the title of the HTML
+ report.
+
See `coverage.report()` for other arguments.
+ Returns a float, the total percentage covered.
+
"""
+ self._harvest_data()
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include,
- html_dir=directory,
+ html_dir=directory, extra_css=extra_css, html_title=title,
)
- reporter = HtmlReporter(self, self.config.ignore_errors)
- reporter.report(morfs, config=self.config)
+ reporter = HtmlReporter(self, self.config)
+ return reporter.report(morfs)
def xml_report(self, morfs=None, outfile=None, ignore_errors=None,
omit=None, include=None):
@@ -600,12 +672,16 @@ class coverage(object):
See `coverage.report()` for other arguments.
+ Returns a float, the total percentage covered.
+
"""
+ self._harvest_data()
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include,
xml_output=outfile,
)
file_to_close = None
+ delete_file = False
if self.config.xml_output:
if self.config.xml_output == '-':
outfile = sys.stdout
@@ -613,11 +689,17 @@ class coverage(object):
outfile = open(self.config.xml_output, "w")
file_to_close = outfile
try:
- reporter = XmlReporter(self, self.config.ignore_errors)
- reporter.report(morfs, outfile=outfile, config=self.config)
+ try:
+ reporter = XmlReporter(self, self.config)
+ return reporter.report(morfs, outfile=outfile)
+ except CoverageException:
+ delete_file = True
+ raise
finally:
if file_to_close:
file_to_close.close()
+ if delete_file:
+ file_be_gone(self.config.xml_output)
def sysinfo(self):
"""Return a list of (key, value) pairs showing internal information."""
@@ -625,22 +707,43 @@ class coverage(object):
import coverage as covmod
import platform, re
+ try:
+ implementation = platform.python_implementation()
+ except AttributeError:
+ implementation = "unknown"
+
info = [
('version', covmod.__version__),
('coverage', covmod.__file__),
('cover_dir', self.cover_dir),
('pylib_dirs', self.pylib_dirs),
('tracer', self.collector.tracer_name()),
+ ('config_files', self.config.attempted_config_files),
+ ('configs_read', self.config.config_files),
('data_path', self.data.filename),
('python', sys.version.replace('\n', '')),
('platform', platform.platform()),
+ ('implementation', implementation),
+ ('executable', sys.executable),
('cwd', os.getcwd()),
('path', sys.path),
- ('environment', [
- ("%s = %s" % (k, v)) for k, v in os.environ.items()
- if re.search("^COV|^PY", k)
- ]),
+ ('environment', sorted([
+ ("%s = %s" % (k, v)) for k, v in iitems(os.environ)
+ if re.search(r"^COV|^PY", k)
+ ])),
+ ('command_line', " ".join(getattr(sys, 'argv', ['???']))),
]
+ if self.source_match:
+ info.append(('source_match', self.source_match.info()))
+ if self.include_match:
+ info.append(('include_match', self.include_match.info()))
+ if self.omit_match:
+ info.append(('omit_match', self.omit_match.info()))
+ if self.cover_match:
+ info.append(('cover_match', self.cover_match.info()))
+ if self.pylib_match:
+ info.append(('pylib_match', self.pylib_match.info()))
+
return info
@@ -667,7 +770,10 @@ def process_startup():
cps = os.environ.get("COVERAGE_PROCESS_START")
if cps:
cov = coverage(config_file=cps, auto_data=True)
- if os.environ.get("COVERAGE_COVERAGE"):
- # Measuring coverage within coverage.py takes yet more trickery.
- cov.cover_dir = "Please measure coverage.py!"
cov.start()
+ cov._warn_no_data = False
+ cov._warn_unimported_source = False
+
+
+# A hack for debugging testing in subprocesses.
+_TEST_NAME_FILE = "" #"/tmp/covtest.txt"
diff --git a/python/helpers/coverage/data.py b/python/helpers/coverage/data.py
index 3263cb38883b..fb88c5b1e638 100644
--- a/python/helpers/coverage/data.py
+++ b/python/helpers/coverage/data.py
@@ -2,7 +2,9 @@
import os
-from coverage.backward import pickle, sorted # pylint: disable=W0622
+from coverage.backward import iitems, pickle, sorted # pylint: disable=W0622
+from coverage.files import PathAliases
+from coverage.misc import file_be_gone
class CoverageData(object):
@@ -21,15 +23,18 @@ class CoverageData(object):
"""
- def __init__(self, basename=None, collector=None):
+ def __init__(self, basename=None, collector=None, debug=None):
"""Create a CoverageData.
`basename` is the name of the file to use for storing data.
`collector` is a string describing the coverage measurement software.
+ `debug` is a `DebugControl` object for writing debug messages.
+
"""
self.collector = collector or 'unknown'
+ self.debug = debug
self.use_file = True
@@ -59,10 +64,6 @@ class CoverageData(object):
#
self.arcs = {}
- self.os = os
- self.sorted = sorted
- self.pickle = pickle
-
def usefile(self, use_file=True):
"""Set whether or not to use a disk file for data."""
self.use_file = use_file
@@ -92,21 +93,21 @@ class CoverageData(object):
def erase(self):
"""Erase the data, both in this object, and from its file storage."""
if self.use_file:
- if self.filename and os.path.exists(self.filename):
- os.remove(self.filename)
+ if self.filename:
+ file_be_gone(self.filename)
self.lines = {}
self.arcs = {}
def line_data(self):
"""Return the map from filenames to lists of line numbers executed."""
return dict(
- [(f, self.sorted(lmap.keys())) for f, lmap in self.lines.items()]
+ [(f, sorted(lmap.keys())) for f, lmap in iitems(self.lines)]
)
def arc_data(self):
"""Return the map from filenames to lists of line number pairs."""
return dict(
- [(f, self.sorted(amap.keys())) for f, amap in self.arcs.items()]
+ [(f, sorted(amap.keys())) for f, amap in iitems(self.arcs)]
)
def write_file(self, filename):
@@ -123,10 +124,13 @@ class CoverageData(object):
if self.collector:
data['collector'] = self.collector
+ if self.debug and self.debug.should('dataio'):
+ self.debug.write("Writing data to %r" % (filename,))
+
# Write the pickle to the file.
fdata = open(filename, 'wb')
try:
- self.pickle.dump(data, fdata, 2)
+ pickle.dump(data, fdata, 2)
finally:
fdata.close()
@@ -136,6 +140,8 @@ class CoverageData(object):
def raw_data(self, filename):
"""Return the raw pickled data from `filename`."""
+ if self.debug and self.debug.should('dataio'):
+ self.debug.write("Reading data from %r" % (filename,))
fdata = open(filename, 'rb')
try:
data = pickle.load(fdata)
@@ -158,33 +164,39 @@ class CoverageData(object):
# Unpack the 'lines' item.
lines = dict([
(f, dict.fromkeys(linenos, None))
- for f, linenos in data.get('lines', {}).items()
+ for f, linenos in iitems(data.get('lines', {}))
])
# Unpack the 'arcs' item.
arcs = dict([
(f, dict.fromkeys(arcpairs, None))
- for f, arcpairs in data.get('arcs', {}).items()
+ for f, arcpairs in iitems(data.get('arcs', {}))
])
except Exception:
pass
return lines, arcs
- def combine_parallel_data(self):
+ def combine_parallel_data(self, aliases=None):
"""Combine a number of data files together.
Treat `self.filename` as a file prefix, and combine the data from all
of the data files starting with that prefix plus a dot.
+ If `aliases` is provided, it's a `PathAliases` object that is used to
+ re-map paths to match the local machine's.
+
"""
+ aliases = aliases or PathAliases()
data_dir, local = os.path.split(self.filename)
localdot = local + '.'
for f in os.listdir(data_dir or '.'):
if f.startswith(localdot):
full_path = os.path.join(data_dir, f)
new_lines, new_arcs = self._read_file(full_path)
- for filename, file_data in new_lines.items():
+ for filename, file_data in iitems(new_lines):
+ filename = aliases.map(filename)
self.lines.setdefault(filename, {}).update(file_data)
- for filename, file_data in new_arcs.items():
+ for filename, file_data in iitems(new_arcs):
+ filename = aliases.map(filename)
self.arcs.setdefault(filename, {}).update(file_data)
if f != local:
os.remove(full_path)
@@ -195,7 +207,7 @@ class CoverageData(object):
`line_data` is { filename: { lineno: None, ... }, ...}
"""
- for filename, linenos in line_data.items():
+ for filename, linenos in iitems(line_data):
self.lines.setdefault(filename, {}).update(linenos)
def add_arc_data(self, arc_data):
@@ -204,7 +216,7 @@ class CoverageData(object):
`arc_data` is { filename: { (l1,l2): None, ... }, ...}
"""
- for filename, arcs in arc_data.items():
+ for filename, arcs in iitems(arc_data):
self.arcs.setdefault(filename, {}).update(arcs)
def touch_file(self, filename):
@@ -245,8 +257,8 @@ class CoverageData(object):
if fullpath:
filename_fn = lambda f: f
else:
- filename_fn = self.os.path.basename
- for filename, lines in self.lines.items():
+ filename_fn = os.path.basename
+ for filename, lines in iitems(self.lines):
summ[filename_fn(filename)] = len(lines)
return summ
diff --git a/python/helpers/coverage/debug.py b/python/helpers/coverage/debug.py
new file mode 100644
index 000000000000..104f3b1d0a43
--- /dev/null
+++ b/python/helpers/coverage/debug.py
@@ -0,0 +1,54 @@
+"""Control of and utilities for debugging."""
+
+import os
+
+
+# When debugging, it can be helpful to force some options, especially when
+# debugging the configuration mechanisms you usually use to control debugging!
+# This is a list of forced debugging options.
+FORCED_DEBUG = []
+
+
+class DebugControl(object):
+ """Control and output for debugging."""
+
+ def __init__(self, options, output):
+ """Configure the options and output file for debugging."""
+ self.options = options
+ self.output = output
+
+ def should(self, option):
+ """Decide whether to output debug information in category `option`."""
+ return (option in self.options or option in FORCED_DEBUG)
+
+ def write(self, msg):
+ """Write a line of debug output."""
+ if self.should('pid'):
+ msg = "pid %5d: %s" % (os.getpid(), msg)
+ self.output.write(msg+"\n")
+ self.output.flush()
+
+ def write_formatted_info(self, info):
+ """Write a sequence of (label,data) pairs nicely."""
+ for line in info_formatter(info):
+ self.write(" %s" % line)
+
+
+def info_formatter(info):
+ """Produce a sequence of formatted lines from info.
+
+ `info` is a sequence of pairs (label, data). The produced lines are
+ nicely formatted, ready to print.
+
+ """
+ label_len = max([len(l) for l, _d in info])
+ for label, data in info:
+ if data == []:
+ data = "-none-"
+ if isinstance(data, (list, tuple)):
+ prefix = "%*s:" % (label_len, label)
+ for e in data:
+ yield "%*s %s" % (label_len+1, prefix, e)
+ prefix = ""
+ else:
+ yield "%*s: %s" % (label_len, label, data)
diff --git a/python/helpers/coverage/execfile.py b/python/helpers/coverage/execfile.py
index 71227b715a3e..f6ebdf79bb9e 100644
--- a/python/helpers/coverage/execfile.py
+++ b/python/helpers/coverage/execfile.py
@@ -1,9 +1,9 @@
"""Execute files of Python code."""
-import imp, os, sys
+import imp, marshal, os, sys
from coverage.backward import exec_code_object, open_source
-from coverage.misc import NoSource, ExceptionDuringRun
+from coverage.misc import ExceptionDuringRun, NoCode, NoSource
try:
@@ -65,6 +65,8 @@ def run_python_module(modulename, args):
openfile.close()
# Finally, hand the file off to run_python_file for execution.
+ pathname = os.path.abspath(pathname)
+ args[0] = pathname
run_python_file(pathname, args, package=packagename)
@@ -82,34 +84,22 @@ def run_python_file(filename, args, package=None):
main_mod = imp.new_module('__main__')
sys.modules['__main__'] = main_mod
main_mod.__file__ = filename
- main_mod.__package__ = package
+ if package:
+ main_mod.__package__ = package
main_mod.__builtins__ = BUILTINS
- # Set sys.argv and the first path element properly.
+ # Set sys.argv properly.
old_argv = sys.argv
- old_path0 = sys.path[0]
sys.argv = args
- sys.path[0] = os.path.abspath(os.path.dirname(filename))
try:
- # Open the source file.
- try:
- source_file = open_source(filename)
- except IOError:
- raise NoSource("No file to run: %r" % filename)
+ # Make a code object somehow.
+ if filename.endswith(".pyc") or filename.endswith(".pyo"):
+ code = make_code_from_pyc(filename)
+ else:
+ code = make_code_from_py(filename)
- try:
- source = source_file.read()
- finally:
- source_file.close()
-
- # We have the source. `compile` still needs the last line to be clean,
- # so make sure it is, then compile a code object from it.
- if source[-1] != '\n':
- source += '\n'
- code = compile(source, filename, "exec")
-
- # Execute the source file.
+ # Execute the code object.
try:
exec_code_object(code, main_mod.__dict__)
except SystemExit:
@@ -130,4 +120,52 @@ def run_python_file(filename, args, package=None):
# Restore the old argv and path
sys.argv = old_argv
- sys.path[0] = old_path0
+
+def make_code_from_py(filename):
+ """Get source from `filename` and make a code object of it."""
+ # Open the source file.
+ try:
+ source_file = open_source(filename)
+ except IOError:
+ raise NoSource("No file to run: %r" % filename)
+
+ try:
+ source = source_file.read()
+ finally:
+ source_file.close()
+
+ # We have the source. `compile` still needs the last line to be clean,
+ # so make sure it is, then compile a code object from it.
+ if not source or source[-1] != '\n':
+ source += '\n'
+ code = compile(source, filename, "exec")
+
+ return code
+
+
+def make_code_from_pyc(filename):
+ """Get a code object from a .pyc file."""
+ try:
+ fpyc = open(filename, "rb")
+ except IOError:
+ raise NoCode("No file to run: %r" % filename)
+
+ try:
+ # First four bytes are a version-specific magic number. It has to
+ # match or we won't run the file.
+ magic = fpyc.read(4)
+ if magic != imp.get_magic():
+ raise NoCode("Bad magic number in .pyc file")
+
+ # Skip the junk in the header that we don't need.
+ fpyc.read(4) # Skip the moddate.
+ if sys.version_info >= (3, 3):
+ # 3.3 added another long to the header (size), skip it.
+ fpyc.read(4)
+
+ # The rest of the file is the code object we want.
+ code = marshal.load(fpyc)
+ finally:
+ fpyc.close()
+
+ return code
diff --git a/python/helpers/coverage/files.py b/python/helpers/coverage/files.py
index a68a0a7fef88..464535a81653 100644
--- a/python/helpers/coverage/files.py
+++ b/python/helpers/coverage/files.py
@@ -1,23 +1,21 @@
"""File wrangling."""
from coverage.backward import to_string
-import fnmatch, os, sys
+from coverage.misc import CoverageException
+import fnmatch, os, os.path, re, sys
+import ntpath, posixpath
class FileLocator(object):
"""Understand how filenames work."""
def __init__(self):
# The absolute path to our current directory.
- self.relative_dir = self.abs_file(os.curdir) + os.sep
+ self.relative_dir = os.path.normcase(abs_file(os.curdir) + os.sep)
# Cache of results of calling the canonical_filename() method, to
# avoid duplicating work.
self.canonical_filename_cache = {}
- def abs_file(self, filename):
- """Return the absolute normalized form of `filename`."""
- return os.path.normcase(os.path.abspath(os.path.realpath(filename)))
-
def relative_filename(self, filename):
"""Return the relative form of `filename`.
@@ -25,8 +23,9 @@ class FileLocator(object):
`FileLocator` was constructed.
"""
- if filename.startswith(self.relative_dir):
- filename = filename.replace(self.relative_dir, "")
+ fnorm = os.path.normcase(filename)
+ if fnorm.startswith(self.relative_dir):
+ filename = filename[len(self.relative_dir):]
return filename
def canonical_filename(self, filename):
@@ -36,19 +35,15 @@ class FileLocator(object):
"""
if filename not in self.canonical_filename_cache:
- f = filename
- if os.path.isabs(f) and not os.path.exists(f):
- if self.get_zip_data(f) is None:
- f = os.path.basename(f)
- if not os.path.isabs(f):
+ if not os.path.isabs(filename):
for path in [os.curdir] + sys.path:
if path is None:
continue
- g = os.path.join(path, f)
- if os.path.exists(g):
- f = g
+ f = os.path.join(path, filename)
+ if os.path.exists(f):
+ filename = f
break
- cf = self.abs_file(f)
+ cf = abs_file(filename)
self.canonical_filename_cache[filename] = cf
return self.canonical_filename_cache[filename]
@@ -77,6 +72,78 @@ class FileLocator(object):
return None
+if sys.platform == 'win32':
+
+ def actual_path(path):
+ """Get the actual path of `path`, including the correct case."""
+ if path in actual_path.cache:
+ return actual_path.cache[path]
+
+ head, tail = os.path.split(path)
+ if not tail:
+ actpath = head
+ elif not head:
+ actpath = tail
+ else:
+ head = actual_path(head)
+ if head in actual_path.list_cache:
+ files = actual_path.list_cache[head]
+ else:
+ try:
+ files = os.listdir(head)
+ except OSError:
+ files = []
+ actual_path.list_cache[head] = files
+ normtail = os.path.normcase(tail)
+ for f in files:
+ if os.path.normcase(f) == normtail:
+ tail = f
+ break
+ actpath = os.path.join(head, tail)
+ actual_path.cache[path] = actpath
+ return actpath
+
+ actual_path.cache = {}
+ actual_path.list_cache = {}
+
+else:
+ def actual_path(filename):
+ """The actual path for non-Windows platforms."""
+ return filename
+
+
+def abs_file(filename):
+ """Return the absolute normalized form of `filename`."""
+ path = os.path.expandvars(os.path.expanduser(filename))
+ path = os.path.abspath(os.path.realpath(path))
+ path = actual_path(path)
+ return path
+
+
+def isabs_anywhere(filename):
+ """Is `filename` an absolute path on any OS?"""
+ return ntpath.isabs(filename) or posixpath.isabs(filename)
+
+
+def prep_patterns(patterns):
+ """Prepare the file patterns for use in a `FnmatchMatcher`.
+
+ If a pattern starts with a wildcard, it is used as a pattern
+ as-is. If it does not start with a wildcard, then it is made
+ absolute with the current directory.
+
+ If `patterns` is None, an empty list is returned.
+
+ """
+ prepped = []
+ for p in patterns or []:
+ if p.startswith("*") or p.startswith("?"):
+ prepped.append(p)
+ else:
+ prepped.append(abs_file(p))
+ return prepped
+
+
class TreeMatcher(object):
"""A matcher for files in a tree."""
def __init__(self, directories):
@@ -85,6 +152,10 @@ class TreeMatcher(object):
def __repr__(self):
return "<TreeMatcher %r>" % self.dirs
+ def info(self):
+ """A list of strings for displaying when dumping state."""
+ return self.dirs
+
def add(self, directory):
"""Add another directory to the list we match for."""
self.dirs.append(directory)
@@ -110,6 +181,10 @@ class FnmatchMatcher(object):
def __repr__(self):
return "<FnmatchMatcher %r>" % self.pats
+ def info(self):
+ """A list of strings for displaying when dumping state."""
+ return self.pats
+
def match(self, fpath):
"""Does `fpath` match one of our filename patterns?"""
for pat in self.pats:
@@ -118,14 +193,117 @@ class FnmatchMatcher(object):
return False
+def sep(s):
+ """Find the path separator used in this string, or os.sep if none."""
+ sep_match = re.search(r"[\\/]", s)
+ if sep_match:
+ the_sep = sep_match.group(0)
+ else:
+ the_sep = os.sep
+ return the_sep
+
+
+class PathAliases(object):
+ """A collection of aliases for paths.
+
+ When combining data files from remote machines, often the paths to source
+ code are different, for example, due to OS differences, or because of
+ serialized checkouts on continuous integration machines.
+
+ A `PathAliases` object tracks a list of pattern/result pairs, and can
+ map a path through those aliases to produce a unified path.
+
+ `locator` is a FileLocator that is used to canonicalize the results.
+
+ """
+ def __init__(self, locator=None):
+ self.aliases = []
+ self.locator = locator
+
+ def add(self, pattern, result):
+ """Add the `pattern`/`result` pair to the list of aliases.
+
+ `pattern` is an `fnmatch`-style pattern. `result` is a simple
+ string. When mapping paths, if a path starts with a match against
+ `pattern`, then that match is replaced with `result`. This models
+ isomorphic source trees being rooted at different places on two
+ different machines.
+
+ `pattern` can't end with a wildcard component, since that would
+ match an entire tree, and not just its root.
+
+ """
+ # The pattern can't end with a wildcard component.
+ pattern = pattern.rstrip(r"\/")
+ if pattern.endswith("*"):
+ raise CoverageException("Pattern must not end with wildcards.")
+ pattern_sep = sep(pattern)
+
+ # The pattern is meant to match a filepath. Let's make it absolute
+ # unless it already is, or is meant to match any prefix.
+ if not pattern.startswith('*') and not isabs_anywhere(pattern):
+ pattern = abs_file(pattern)
+ pattern += pattern_sep
+
+ # Make a regex from the pattern. fnmatch always adds a \Z or $ to
+ # match the whole string, which we don't want.
+ regex_pat = fnmatch.translate(pattern).replace(r'\Z(', '(')
+ if regex_pat.endswith("$"):
+ regex_pat = regex_pat[:-1]
+ # We want */a/b.py to match on Windows too, so change slash to match
+ # either separator.
+ regex_pat = regex_pat.replace(r"\/", r"[\\/]")
+ # We want case-insensitive matching, so add that flag.
+ regex = re.compile(r"(?i)" + regex_pat)
+
+ # Normalize the result: it must end with a path separator.
+ result_sep = sep(result)
+ result = result.rstrip(r"\/") + result_sep
+ self.aliases.append((regex, result, pattern_sep, result_sep))
+
+ def map(self, path):
+ """Map `path` through the aliases.
+
+ `path` is checked against all of the patterns. The first pattern to
+ match is used to replace the root of the path with the result root.
+ Only one pattern is ever used. If no patterns match, `path` is
+ returned unchanged.
+
+ The separator style in the result is made to match that of the result
+ in the alias.
+
+ """
+ for regex, result, pattern_sep, result_sep in self.aliases:
+ m = regex.match(path)
+ if m:
+ new = path.replace(m.group(0), result)
+ if pattern_sep != result_sep:
+ new = new.replace(pattern_sep, result_sep)
+ if self.locator:
+ new = self.locator.canonical_filename(new)
+ return new
+ return path
+
+
def find_python_files(dirname):
- """Yield all of the importable Python files in `dirname`, recursively."""
- for dirpath, dirnames, filenames in os.walk(dirname, topdown=True):
- if '__init__.py' not in filenames:
+ """Yield all of the importable Python files in `dirname`, recursively.
+
+ To be importable, the files have to be in a directory with a __init__.py,
+ except for `dirname` itself, which isn't required to have one. The
+ assumption is that `dirname` was specified directly, so the user knows
+ best, but subdirectories are checked for a __init__.py to be sure we only
+ find the importable files.
+
+ """
+ for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)):
+ if i > 0 and '__init__.py' not in filenames:
# If a directory doesn't have __init__.py, then it isn't
# importable and neither are its files
del dirnames[:]
continue
for filename in filenames:
- if fnmatch.fnmatch(filename, "*.py"):
+ # We're only interested in files that look like reasonable Python
+ # files: Must end with .py or .pyw, and must not have certain funny
+ # characters that probably mean they are editor junk.
+ if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename):
yield os.path.join(dirpath, filename)
diff --git a/python/helpers/coverage/fullcoverage/encodings.py b/python/helpers/coverage/fullcoverage/encodings.py
new file mode 100644
index 000000000000..6a258d6710c4
--- /dev/null
+++ b/python/helpers/coverage/fullcoverage/encodings.py
@@ -0,0 +1,57 @@
+"""Imposter encodings module that installs a coverage-style tracer.
+
+This is NOT the encodings module; it is an imposter that sets up tracing
+instrumentation and then replaces itself with the real encodings module.
+
+If the directory that holds this file is placed first in the PYTHONPATH when
+using "coverage" to run Python's tests, then this file will become the very
+first module imported by the internals of Python 3. It installs a
+coverage-compatible trace function that can watch Standard Library modules
+execute from the very earliest stages of Python's own boot process. This fixes
+a problem with coverage - that it starts too late to trace the coverage of many
+of the most fundamental modules in the Standard Library.
+
+"""
+
+import sys
+
+class FullCoverageTracer(object):
+ def __init__(self):
+ # `traces` is a list of trace events. Frames are tricky: the same
+ # frame object is used for a whole scope, with new line numbers
+ # written into it. So in one scope, all the frame objects are the
+ # same object, and will eventually all will point to the last line
+ # executed. So we keep the line numbers alongside the frames.
+ # The list looks like:
+ #
+ # traces = [
+ # ((frame, event, arg), lineno), ...
+ # ]
+ #
+ self.traces = []
+
+ def fullcoverage_trace(self, *args):
+ frame, event, arg = args
+ self.traces.append((args, frame.f_lineno))
+ return self.fullcoverage_trace
+
+sys.settrace(FullCoverageTracer().fullcoverage_trace)
+
+# In coverage/files.py is actual_filename(), which uses glob.glob. I don't
+# understand why, but that use of glob borks everything if fullcoverage is in
+# effect. So here we make an ugly hail-mary pass to switch off glob.glob over
+# there. This means when using fullcoverage, Windows path names will not be
+# their actual case.
+
+#sys.fullcoverage = True
+
+# Finally, remove our own directory from sys.path; remove ourselves from
+# sys.modules; and re-import "encodings", which will be the real package
+# this time. Note that the delete from sys.modules dictionary has to
+# happen last, since all of the symbols in this module will become None
+# at that exact moment, including "sys".
+
+parentdir = max(filter(__file__.startswith, sys.path), key=len)
+sys.path.remove(parentdir)
+del sys.modules['encodings']
+import encodings
diff --git a/python/helpers/coverage/html.py b/python/helpers/coverage/html.py
index fffd9b45b41a..5242236c1ed9 100644
--- a/python/helpers/coverage/html.py
+++ b/python/helpers/coverage/html.py
@@ -1,21 +1,45 @@
"""HTML reporting for Coverage."""
-import os, re, shutil
+import os, re, shutil, sys
import coverage
from coverage.backward import pickle
from coverage.misc import CoverageException, Hasher
-from coverage.phystokens import source_token_lines
+from coverage.phystokens import source_token_lines, source_encoding
from coverage.report import Reporter
+from coverage.results import Numbers
from coverage.templite import Templite
-# Disable pylint msg W0612, because a bunch of variables look unused, but
-# they're accessed in a Templite context via locals().
-# pylint: disable=W0612
-def data_filename(fname):
- """Return the path to a data file of ours."""
- return os.path.join(os.path.split(__file__)[0], fname)
+# Static files are looked for in a list of places.
+STATIC_PATH = [
+ # The place Debian puts system Javascript libraries.
+ "/usr/share/javascript",
+
+ # Our htmlfiles directory.
+ os.path.join(os.path.dirname(__file__), "htmlfiles"),
+]
+
+def data_filename(fname, pkgdir=""):
+ """Return the path to a data file of ours.
+
+ The file is searched for on `STATIC_PATH`, and the first place it's found,
+ is returned.
+
+ Each directory in `STATIC_PATH` is searched as-is, and also, if `pkgdir`
+ is provided, at that subdirectory.
+
+ """
+ for static_dir in STATIC_PATH:
+ static_filename = os.path.join(static_dir, fname)
+ if os.path.exists(static_filename):
+ return static_filename
+ if pkgdir:
+ static_filename = os.path.join(static_dir, pkgdir, fname)
+ if os.path.exists(static_filename):
+ return static_filename
+ raise CoverageException("Couldn't find static file %r" % fname)
+
def data(fname):
"""Return the contents of a data file of ours."""
@@ -31,26 +55,27 @@ class HtmlReporter(Reporter):
# These files will be copied from the htmlfiles dir to the output dir.
STATIC_FILES = [
- "style.css",
- "jquery-1.4.3.min.js",
- "jquery.hotkeys.js",
- "jquery.isonscreen.js",
- "jquery.tablesorter.min.js",
- "coverage_html.js",
- "keybd_closed.png",
- "keybd_open.png",
+ ("style.css", ""),
+ ("jquery.min.js", "jquery"),
+ ("jquery.hotkeys.js", "jquery-hotkeys"),
+ ("jquery.isonscreen.js", "jquery-isonscreen"),
+ ("jquery.tablesorter.min.js", "jquery-tablesorter"),
+ ("coverage_html.js", ""),
+ ("keybd_closed.png", ""),
+ ("keybd_open.png", ""),
]
- def __init__(self, cov, ignore_errors=False):
- super(HtmlReporter, self).__init__(cov, ignore_errors)
+ def __init__(self, cov, config):
+ super(HtmlReporter, self).__init__(cov, config)
self.directory = None
self.template_globals = {
'escape': escape,
+ 'title': self.config.html_title,
'__url__': coverage.__url__,
'__version__': coverage.__version__,
}
self.source_tmpl = Templite(
- data("htmlfiles/pyfile.html"), self.template_globals
+ data("pyfile.html"), self.template_globals
)
self.coverage = cov
@@ -58,29 +83,34 @@ class HtmlReporter(Reporter):
self.files = []
self.arcs = self.coverage.data.has_arcs()
self.status = HtmlStatus()
+ self.extra_css = None
+ self.totals = Numbers()
- def report(self, morfs, config=None):
+ def report(self, morfs):
"""Generate an HTML report for `morfs`.
- `morfs` is a list of modules or filenames. `config` is a
- CoverageConfig instance.
+ `morfs` is a list of modules or filenames.
"""
- assert config.html_dir, "must provide a directory for html reporting"
+ assert self.config.html_dir, "must give a directory for html reporting"
# Read the status data.
- self.status.read(config.html_dir)
+ self.status.read(self.config.html_dir)
# Check that this run used the same settings as the last run.
m = Hasher()
- m.update(config)
+ m.update(self.config)
these_settings = m.digest()
if self.status.settings_hash() != these_settings:
self.status.reset()
self.status.set_settings_hash(these_settings)
+ # The user may have extra CSS they want copied.
+ if self.config.extra_css:
+ self.extra_css = os.path.basename(self.config.extra_css)
+
# Process all the files.
- self.report_files(self.html_file, morfs, config, config.html_dir)
+ self.report_files(self.html_file, morfs, self.config.html_dir)
if not self.files:
raise CoverageException("No data to report.")
@@ -88,13 +118,34 @@ class HtmlReporter(Reporter):
# Write the index file.
self.index_file()
- # Create the once-per-directory files.
- for static in self.STATIC_FILES:
+ self.make_local_static_report_files()
+
+ return self.totals.pc_covered
+
+ def make_local_static_report_files(self):
+ """Make local instances of static files for HTML report."""
+ # The files we provide must always be copied.
+ for static, pkgdir in self.STATIC_FILES:
shutil.copyfile(
- data_filename("htmlfiles/" + static),
+ data_filename(static, pkgdir),
os.path.join(self.directory, static)
)
+ # The user may have extra CSS they want copied.
+ if self.extra_css:
+ shutil.copyfile(
+ self.config.extra_css,
+ os.path.join(self.directory, self.extra_css)
+ )
+
+ def write_html(self, fname, html):
+ """Write `html` to `fname`, properly encoded."""
+ fout = open(fname, "wb")
+ try:
+ fout.write(html.encode('ascii', 'xmlcharrefreplace'))
+ finally:
+ fout.close()
+
def file_hash(self, source, cu):
"""Compute a hash that changes if the file needs to be re-reported."""
m = Hasher()
@@ -121,11 +172,20 @@ class HtmlReporter(Reporter):
self.status.set_file_hash(flat_rootname, this_hash)
+ # If need be, determine the encoding of the source file. We use it
+ # later to properly write the HTML.
+ if sys.version_info < (3, 0):
+ encoding = source_encoding(source)
+ # Some UTF8 files have the dreaded UTF8 BOM. If so, junk it.
+ if encoding.startswith("utf-8") and source[:3] == "\xef\xbb\xbf":
+ source = source[3:]
+ encoding = "utf-8"
+
+ # Get the numbers for this file.
nums = analysis.numbers
- missing_branch_arcs = analysis.missing_branch_arcs()
- n_par = 0 # accumulated below.
- arcs = self.arcs
+ if self.arcs:
+ missing_branch_arcs = analysis.missing_branch_arcs()
# These classes determine which lines are highlighted by default.
c_run = "run hide_run"
@@ -149,7 +209,6 @@ class HtmlReporter(Reporter):
line_class.append(c_mis)
elif self.arcs and lineno in missing_branch_arcs:
line_class.append(c_par)
- n_par += 1
annlines = []
for b in missing_branch_arcs[lineno]:
if b < 0:
@@ -184,19 +243,22 @@ class HtmlReporter(Reporter):
})
# Write the HTML page for this file.
+ html = spaceless(self.source_tmpl.render({
+ 'c_exc': c_exc, 'c_mis': c_mis, 'c_par': c_par, 'c_run': c_run,
+ 'arcs': self.arcs, 'extra_css': self.extra_css,
+ 'cu': cu, 'nums': nums, 'lines': lines,
+ }))
+
+ if sys.version_info < (3, 0):
+ html = html.decode(encoding)
+
html_filename = flat_rootname + ".html"
html_path = os.path.join(self.directory, html_filename)
- html = spaceless(self.source_tmpl.render(locals()))
- fhtml = open(html_path, 'w')
- try:
- fhtml.write(html)
- finally:
- fhtml.close()
+ self.write_html(html_path, html)
# Save this file's information for the index file.
index_info = {
'nums': nums,
- 'par': n_par,
'html_filename': html_filename,
'name': cu.name,
}
@@ -206,19 +268,24 @@ class HtmlReporter(Reporter):
def index_file(self):
"""Write the index.html file for this report."""
index_tmpl = Templite(
- data("htmlfiles/index.html"), self.template_globals
+ data("index.html"), self.template_globals
)
- files = self.files
- arcs = self.arcs
-
- totals = sum([f['nums'] for f in files])
+ self.totals = sum([f['nums'] for f in self.files])
- fhtml = open(os.path.join(self.directory, "index.html"), "w")
- try:
- fhtml.write(index_tmpl.render(locals()))
- finally:
- fhtml.close()
+ html = index_tmpl.render({
+ 'arcs': self.arcs,
+ 'extra_css': self.extra_css,
+ 'files': self.files,
+ 'totals': self.totals,
+ })
+
+ if sys.version_info < (3, 0):
+ html = html.decode("utf-8")
+ self.write_html(
+ os.path.join(self.directory, "index.html"),
+ html
+ )
# Write the latest hashes for next time.
self.status.write(self.directory)
@@ -243,8 +310,12 @@ class HtmlStatus(object):
usable = False
try:
status_file = os.path.join(directory, self.STATUS_FILE)
- status = pickle.load(open(status_file, "rb"))
- except IOError:
+ fstatus = open(status_file, "rb")
+ try:
+ status = pickle.load(fstatus)
+ finally:
+ fstatus.close()
+ except (IOError, ValueError):
usable = False
else:
usable = True
@@ -321,5 +392,5 @@ def spaceless(html):
Get rid of some.
"""
- html = re.sub(">\s+<p ", ">\n<p ", html)
+ html = re.sub(r">\s+<p ", ">\n<p ", html)
return html
diff --git a/python/helpers/coverage/htmlfiles/coverage_html.js b/python/helpers/coverage/htmlfiles/coverage_html.js
index da3e22c812c1..b24006d25e00 100644
--- a/python/helpers/coverage/htmlfiles/coverage_html.js
+++ b/python/helpers/coverage/htmlfiles/coverage_html.js
@@ -122,6 +122,11 @@ coverage.pyfile_ready = function ($) {
.bind('keydown', '1', coverage.to_first_chunk)
;
+ $(".button_toggle_run").click(function (evt) {coverage.toggle_lines(evt.target, "run");});
+ $(".button_toggle_exc").click(function (evt) {coverage.toggle_lines(evt.target, "exc");});
+ $(".button_toggle_mis").click(function (evt) {coverage.toggle_lines(evt.target, "mis");});
+ $(".button_toggle_par").click(function (evt) {coverage.toggle_lines(evt.target, "par");});
+
coverage.assign_shortkeys();
coverage.wire_up_help_panel();
};
@@ -369,4 +374,3 @@ coverage.scroll_window = function (to_pos) {
coverage.finish_scrolling = function () {
$("html,body").stop(true, true);
};
-
diff --git a/python/helpers/coverage/htmlfiles/index.html b/python/helpers/coverage/htmlfiles/index.html
index 04b314a3427c..c831823dd239 100644
--- a/python/helpers/coverage/htmlfiles/index.html
+++ b/python/helpers/coverage/htmlfiles/index.html
@@ -2,9 +2,12 @@
<html>
<head>
<meta http-equiv='Content-Type' content='text/html; charset=utf-8'>
- <title>Coverage report</title>
+ <title>{{ title|escape }}</title>
<link rel='stylesheet' href='style.css' type='text/css'>
- <script type='text/javascript' src='jquery-1.4.3.min.js'></script>
+ {% if extra_css %}
+ <link rel='stylesheet' href='{{ extra_css }}' type='text/css'>
+ {% endif %}
+ <script type='text/javascript' src='jquery.min.js'></script>
<script type='text/javascript' src='jquery.tablesorter.min.js'></script>
<script type='text/javascript' src='jquery.hotkeys.js'></script>
<script type='text/javascript' src='coverage_html.js'></script>
@@ -16,7 +19,7 @@
<div id='header'>
<div class='content'>
- <h1>Coverage report:
+ <h1>{{ title|escape }}:
<span class='pc_cov'>{{totals.pc_covered_str}}%</span>
</h1>
<img id='keyboard_icon' src='keybd_closed.png'>
@@ -66,7 +69,7 @@
<td>{{totals.n_excluded}}</td>
{% if arcs %}
<td>{{totals.n_branches}}</td>
- <td>{{totals.n_missing_branches}}</td>
+ <td>{{totals.n_partial_branches}}</td>
{% endif %}
<td class='right'>{{totals.pc_covered_str}}%</td>
</tr>
@@ -80,7 +83,7 @@
<td>{{file.nums.n_excluded}}</td>
{% if arcs %}
<td>{{file.nums.n_branches}}</td>
- <td>{{file.nums.n_missing_branches}}</td>
+ <td>{{file.nums.n_partial_branches}}</td>
{% endif %}
<td class='right'>{{file.nums.pc_covered_str}}%</td>
</tr>
diff --git a/python/helpers/coverage/htmlfiles/jquery-1.4.3.min.js b/python/helpers/coverage/htmlfiles/jquery.min.js
index c941a5f7a9f3..c941a5f7a9f3 100644
--- a/python/helpers/coverage/htmlfiles/jquery-1.4.3.min.js
+++ b/python/helpers/coverage/htmlfiles/jquery.min.js
diff --git a/python/helpers/coverage/htmlfiles/keybd_closed.png b/python/helpers/coverage/htmlfiles/keybd_closed.png
index 6843abf0998b..f2b0418d2a33 100644..100755
--- a/python/helpers/coverage/htmlfiles/keybd_closed.png
+++ b/python/helpers/coverage/htmlfiles/keybd_closed.png
Binary files differ
diff --git a/python/helpers/coverage/htmlfiles/keybd_open.png b/python/helpers/coverage/htmlfiles/keybd_open.png
index 5a681ea70582..a77961db5424 100644..100755
--- a/python/helpers/coverage/htmlfiles/keybd_open.png
+++ b/python/helpers/coverage/htmlfiles/keybd_open.png
Binary files differ
diff --git a/python/helpers/coverage/htmlfiles/pyfile.html b/python/helpers/coverage/htmlfiles/pyfile.html
index ee0a3b1bbc8b..88c158dd41de 100644
--- a/python/helpers/coverage/htmlfiles/pyfile.html
+++ b/python/helpers/coverage/htmlfiles/pyfile.html
@@ -7,7 +7,10 @@
<meta http-equiv='X-UA-Compatible' content='IE=emulateIE7' />
<title>Coverage for {{cu.name|escape}}: {{nums.pc_covered_str}}%</title>
<link rel='stylesheet' href='style.css' type='text/css'>
- <script type='text/javascript' src='jquery-1.4.3.min.js'></script>
+ {% if extra_css %}
+ <link rel='stylesheet' href='{{ extra_css }}' type='text/css'>
+ {% endif %}
+ <script type='text/javascript' src='jquery.min.js'></script>
<script type='text/javascript' src='jquery.hotkeys.js'></script>
<script type='text/javascript' src='jquery.isonscreen.js'></script>
<script type='text/javascript' src='coverage_html.js'></script>
@@ -24,12 +27,12 @@
</h1>
<img id='keyboard_icon' src='keybd_closed.png'>
<h2 class='stats'>
- {{nums.n_statements}} statements
- <span class='{{c_run}} shortkey_r' onclick='coverage.toggle_lines(this, "run")'>{{nums.n_executed}} run</span>
- <span class='{{c_mis}} shortkey_m' onclick='coverage.toggle_lines(this, "mis")'>{{nums.n_missing}} missing</span>
- <span class='{{c_exc}} shortkey_x' onclick='coverage.toggle_lines(this, "exc")'>{{nums.n_excluded}} excluded</span>
+ {{nums.n_statements}} statements &nbsp;
+ <span class='{{c_run}} shortkey_r button_toggle_run'>{{nums.n_executed}} run</span>
+ <span class='{{c_mis}} shortkey_m button_toggle_mis'>{{nums.n_missing}} missing</span>
+ <span class='{{c_exc}} shortkey_x button_toggle_exc'>{{nums.n_excluded}} excluded</span>
{% if arcs %}
- <span class='{{c_par}} shortkey_p' onclick='coverage.toggle_lines(this, "par")'>{{n_par}} partial</span>
+ <span class='{{c_par}} shortkey_p button_toggle_par'>{{nums.n_partial_branches}} partial</span>
{% endif %}
</h2>
</div>
diff --git a/python/helpers/coverage/htmlfiles/style.css b/python/helpers/coverage/htmlfiles/style.css
index c40357b8b482..811c64019c06 100644
--- a/python/helpers/coverage/htmlfiles/style.css
+++ b/python/helpers/coverage/htmlfiles/style.css
@@ -24,8 +24,8 @@ html>body {
/* Set base font size to 12/16 */
p {
- font-size: .75em; /* 12/16 */
- line-height: 1.3333em; /* 16/12 */
+ font-size: .75em; /* 12/16 */
+ line-height: 1.33333333em; /* 16/12 */
}
table {
@@ -102,6 +102,31 @@ h2.stats {
border-color: #999 #ccc #ccc #999;
}
+.stats span.run {
+ background: #ddffdd;
+}
+.stats span.exc {
+ background: #eeeeee;
+}
+.stats span.mis {
+ background: #ffdddd;
+}
+.stats span.hide_run {
+ background: #eeffee;
+}
+.stats span.hide_exc {
+ background: #f5f5f5;
+}
+.stats span.hide_mis {
+ background: #ffeeee;
+}
+.stats span.par {
+ background: #ffffaa;
+}
+.stats span.hide_par {
+ background: #ffffcc;
+}
+
/* Help panel */
#keyboard_icon {
float: right;
diff --git a/python/helpers/coverage/misc.py b/python/helpers/coverage/misc.py
index fd9be8572a5b..0378173fcc3d 100644
--- a/python/helpers/coverage/misc.py
+++ b/python/helpers/coverage/misc.py
@@ -1,6 +1,10 @@
"""Miscellaneous stuff for Coverage."""
+import errno
import inspect
+import os
+import sys
+
from coverage.backward import md5, sorted # pylint: disable=W0622
from coverage.backward import string_class, to_bytes
@@ -34,6 +38,8 @@ def format_lines(statements, lines):
i = 0
j = 0
start = None
+ statements = sorted(statements)
+ lines = sorted(lines)
while i < len(statements) and j < len(lines):
if statements[i] == lines[j]:
if start == None:
@@ -50,6 +56,12 @@ def format_lines(statements, lines):
return ret
+def short_stack():
+ """Return a string summarizing the call stack."""
+ stack = inspect.stack()[:0:-1]
+ return "\n".join(["%30s : %s @%d" % (t[3],t[1],t[2]) for t in stack])
+
+
def expensive(fn):
"""A decorator to cache the result of an expensive operation.
@@ -76,13 +88,23 @@ def bool_or_none(b):
def join_regex(regexes):
"""Combine a list of regexes into one that matches any of them."""
if len(regexes) > 1:
- return "(" + ")|(".join(regexes) + ")"
+ return "|".join(["(%s)" % r for r in regexes])
elif regexes:
return regexes[0]
else:
return ""
+def file_be_gone(path):
+ """Remove a file, and don't get annoyed if it doesn't exist."""
+ try:
+ os.remove(path)
+ except OSError:
+ _, e, _ = sys.exc_info()
+ if e.errno != errno.ENOENT:
+ raise
+
+
class Hasher(object):
"""Hashes Python data into md5."""
def __init__(self):
@@ -93,8 +115,10 @@ class Hasher(object):
self.md5.update(to_bytes(str(type(v))))
if isinstance(v, string_class):
self.md5.update(to_bytes(v))
+ elif v is None:
+ pass
elif isinstance(v, (int, float)):
- self.update(str(v))
+ self.md5.update(to_bytes(str(v)))
elif isinstance(v, (tuple, list)):
for e in v:
self.update(e)
@@ -126,6 +150,10 @@ class NoSource(CoverageException):
"""We couldn't find the source for a module."""
pass
+class NoCode(NoSource):
+ """We couldn't find any code at all."""
+ pass
+
class NotPython(CoverageException):
"""A source file turned out not to be parsable Python."""
pass
diff --git a/python/helpers/coverage/parser.py b/python/helpers/coverage/parser.py
index cbbb5a6a2ec1..7a145a2a5346 100644
--- a/python/helpers/coverage/parser.py
+++ b/python/helpers/coverage/parser.py
@@ -1,9 +1,11 @@
"""Code parsing for Coverage."""
-import glob, opcode, os, re, sys, token, tokenize
+import dis, re, sys, token, tokenize
from coverage.backward import set, sorted, StringIO # pylint: disable=W0622
-from coverage.backward import open_source
+from coverage.backward import open_source, range # pylint: disable=W0622
+from coverage.backward import reversed # pylint: disable=W0622
+from coverage.backward import bytes_to_ints
from coverage.bytecode import ByteCodes, CodeObjects
from coverage.misc import nice_pair, expensive, join_regex
from coverage.misc import CoverageException, NoSource, NotPython
@@ -32,9 +34,13 @@ class CodeParser(object):
except IOError:
_, err, _ = sys.exc_info()
raise NoSource(
- "No source for code: %r: %s" % (self.filename, err)
+ "No source for code: '%s': %s" % (self.filename, err)
)
+ # Scrap the BOM if it exists.
+ if self.text and ord(self.text[0]) == 0xfeff:
+ self.text = self.text[1:]
+
self.exclude = exclude
self.show_tokens = False
@@ -102,9 +108,9 @@ class CodeParser(object):
first_line = None
empty = True
- tokgen = tokenize.generate_tokens(StringIO(self.text).readline)
+ tokgen = generate_tokens(self.text)
for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen:
- if self.show_tokens: # pragma: no cover
+ if self.show_tokens: # pragma: not covered
print("%10s %5s %-20r %r" % (
tokenize.tok_name.get(toktype, toktype),
nice_pair((slineno, elineno)), ttext, ltext
@@ -130,8 +136,7 @@ class CodeParser(object):
# (a trick from trace.py in the stdlib.) This works for
# 99.9999% of cases. For the rest (!) see:
# http://stackoverflow.com/questions/1769332/x/1769794#1769794
- for i in range(slineno, elineno+1):
- self.docstrings.add(i)
+ self.docstrings.update(range(slineno, elineno+1))
elif toktype == token.NEWLINE:
if first_line is not None and elineno != first_line:
# We're at the end of a line, and we've ended on a
@@ -170,16 +175,18 @@ class CodeParser(object):
first_line = line
return first_line
- def first_lines(self, lines, ignore=None):
+ def first_lines(self, lines, *ignores):
"""Map the line numbers in `lines` to the correct first line of the
statement.
- Skip any line mentioned in `ignore`.
+ Skip any line mentioned in any of the sequences in `ignores`.
- Returns a sorted list of the first lines.
+ Returns a set of the first lines.
"""
- ignore = ignore or []
+ ignore = set()
+ for ign in ignores:
+ ignore.update(ign)
lset = set()
for l in lines:
if l in ignore:
@@ -187,23 +194,34 @@ class CodeParser(object):
new_l = self.first_line(l)
if new_l not in ignore:
lset.add(new_l)
- return sorted(lset)
+ return lset
def parse_source(self):
"""Parse source text to find executable lines, excluded lines, etc.
- Return values are 1) a sorted list of executable line numbers, and
- 2) a sorted list of excluded line numbers.
+ Return values are 1) a set of executable line numbers, and 2) a set of
+ excluded line numbers.
Reported line numbers are normalized to the first line of multi-line
statements.
"""
- self._raw_parse()
+ try:
+ self._raw_parse()
+ except (tokenize.TokenError, IndentationError):
+ _, tokerr, _ = sys.exc_info()
+ msg, lineno = tokerr.args
+ raise NotPython(
+ "Couldn't parse '%s' as Python source: '%s' at %s" %
+ (self.filename, msg, lineno)
+ )
excluded_lines = self.first_lines(self.excluded)
- ignore = excluded_lines + list(self.docstrings)
- lines = self.first_lines(self.statement_starts, ignore)
+ lines = self.first_lines(
+ self.statement_starts,
+ excluded_lines,
+ self.docstrings
+ )
return lines, excluded_lines
@@ -258,8 +276,8 @@ class CodeParser(object):
## Opcodes that guide the ByteParser.
def _opcode(name):
- """Return the opcode by name from the opcode module."""
- return opcode.opmap[name]
+ """Return the opcode by name from the dis module."""
+ return dis.opmap[name]
def _opcode_set(*names):
"""Return a set of opcodes by the names in `names`."""
@@ -297,7 +315,7 @@ OPS_EXCEPT_BLOCKS = _opcode_set('SETUP_EXCEPT', 'SETUP_FINALLY')
OPS_POP_BLOCK = _opcode_set('POP_BLOCK')
# Opcodes that have a jump destination, but aren't really a jump.
-OPS_NO_JUMP = _opcode_set('SETUP_EXCEPT', 'SETUP_FINALLY')
+OPS_NO_JUMP = OPS_PUSH_BLOCK
# Individual opcodes we need below.
OP_BREAK_LOOP = _opcode('BREAK_LOOP')
@@ -314,6 +332,7 @@ class ByteParser(object):
def __init__(self, code=None, text=None, filename=None):
if code:
self.code = code
+ self.text = text
else:
if not text:
assert filename, "If no code or text, need a filename"
@@ -322,6 +341,7 @@ class ByteParser(object):
text = sourcef.read()
finally:
sourcef.close()
+ self.text = text
try:
# Python 2.3 and 2.4 don't like partial last lines, so be sure
@@ -350,69 +370,54 @@ class ByteParser(object):
The iteration includes `self` as its first value.
"""
- return map(lambda c: ByteParser(code=c), CodeObjects(self.code))
-
- # Getting numbers from the lnotab value changed in Py3.0.
- if sys.version_info >= (3, 0):
- def _lnotab_increments(self, lnotab):
- """Return a list of ints from the lnotab bytes in 3.x"""
- return list(lnotab)
- else:
- def _lnotab_increments(self, lnotab):
- """Return a list of ints from the lnotab string in 2.x"""
- return [ord(c) for c in lnotab]
+ children = CodeObjects(self.code)
+ return [ByteParser(code=c, text=self.text) for c in children]
def _bytes_lines(self):
"""Map byte offsets to line numbers in `code`.
Uses co_lnotab described in Python/compile.c to map byte offsets to
- line numbers. Returns a list: [(b0, l0), (b1, l1), ...]
+ line numbers. Produces a sequence: (b0, l0), (b1, l1), ...
+
+ Only byte offsets that correspond to line numbers are included in the
+ results.
"""
# Adapted from dis.py in the standard library.
- byte_increments = self._lnotab_increments(self.code.co_lnotab[0::2])
- line_increments = self._lnotab_increments(self.code.co_lnotab[1::2])
+ byte_increments = bytes_to_ints(self.code.co_lnotab[0::2])
+ line_increments = bytes_to_ints(self.code.co_lnotab[1::2])
- bytes_lines = []
last_line_num = None
line_num = self.code.co_firstlineno
byte_num = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if line_num != last_line_num:
- bytes_lines.append((byte_num, line_num))
+ yield (byte_num, line_num)
last_line_num = line_num
byte_num += byte_incr
line_num += line_incr
if line_num != last_line_num:
- bytes_lines.append((byte_num, line_num))
- return bytes_lines
+ yield (byte_num, line_num)
def _find_statements(self):
"""Find the statements in `self.code`.
- Return a set of line numbers that start statements. Recurses into all
- code objects reachable from `self.code`.
+ Produce a sequence of line numbers that start statements. Recurses
+ into all code objects reachable from `self.code`.
"""
- stmts = set()
for bp in self.child_parsers():
# Get all of the lineno information from this code.
for _, l in bp._bytes_lines():
- stmts.add(l)
- return stmts
-
- def _disassemble(self): # pragma: no cover
- """Disassemble code, for ad-hoc experimenting."""
-
- import dis
-
- for bp in self.child_parsers():
- print("\n%s: " % bp.code)
- dis.dis(bp.code)
- print("Bytes lines: %r" % bp._bytes_lines())
+ yield l
- print("")
+ def _block_stack_repr(self, block_stack):
+ """Get a string version of `block_stack`, for debugging."""
+ blocks = ", ".join(
+ ["(%s, %r)" % (dis.opname[b[0]], b[1]) for b in block_stack]
+ )
+ return "[" + blocks + "]"
def _split_into_chunks(self):
"""Split the code object into a list of `Chunk` objects.
@@ -423,10 +428,11 @@ class ByteParser(object):
Returns a list of `Chunk` objects.
"""
-
# The list of chunks so far, and the one we're working on.
chunks = []
chunk = None
+
+ # A dict mapping byte offsets of line starts to the line numbers.
bytes_lines_map = dict(self._bytes_lines())
# The block stack: loops and try blocks get pushed here for the
@@ -441,24 +447,38 @@ class ByteParser(object):
# We have to handle the last two bytecodes specially.
ult = penult = None
- for bc in ByteCodes(self.code.co_code):
+ # Get a set of all of the jump-to points.
+ jump_to = set()
+ bytecodes = list(ByteCodes(self.code.co_code))
+ for bc in bytecodes:
+ if bc.jump_to >= 0:
+ jump_to.add(bc.jump_to)
+
+ chunk_lineno = 0
+
+ # Walk the byte codes building chunks.
+ for bc in bytecodes:
# Maybe have to start a new chunk
+ start_new_chunk = False
+ first_chunk = False
if bc.offset in bytes_lines_map:
# Start a new chunk for each source line number.
- if chunk:
- chunk.exits.add(bc.offset)
- chunk = Chunk(bc.offset, bytes_lines_map[bc.offset])
- chunks.append(chunk)
+ start_new_chunk = True
+ chunk_lineno = bytes_lines_map[bc.offset]
+ first_chunk = True
+ elif bc.offset in jump_to:
+ # To make chunks have a single entrance, we have to make a new
+ # chunk when we get to a place some bytecode jumps to.
+ start_new_chunk = True
elif bc.op in OPS_CHUNK_BEGIN:
# Jumps deserve their own unnumbered chunk. This fixes
# problems with jumps to jumps getting confused.
+ start_new_chunk = True
+
+ if not chunk or start_new_chunk:
if chunk:
chunk.exits.add(bc.offset)
- chunk = Chunk(bc.offset)
- chunks.append(chunk)
-
- if not chunk:
- chunk = Chunk(bc.offset)
+ chunk = Chunk(bc.offset, chunk_lineno, first_chunk)
chunks.append(chunk)
# Look at the opcode
@@ -487,15 +507,11 @@ class ByteParser(object):
chunk.exits.add(block_stack[-1][1])
chunk = None
if bc.op == OP_END_FINALLY:
- if block_stack:
- # A break that goes through a finally will jump to whatever
- # block is on top of the stack.
- chunk.exits.add(block_stack[-1][1])
# For the finally clause we need to find the closest exception
# block, and use its jump target as an exit.
- for iblock in range(len(block_stack)-1, -1, -1):
- if block_stack[iblock][0] in OPS_EXCEPT_BLOCKS:
- chunk.exits.add(block_stack[iblock][1])
+ for block in reversed(block_stack):
+ if block[0] in OPS_EXCEPT_BLOCKS:
+ chunk.exits.add(block[1])
break
if bc.op == OP_COMPARE_OP and bc.arg == COMPARE_EXCEPTION:
# This is an except clause. We want to overlook the next
@@ -521,23 +537,33 @@ class ByteParser(object):
last_chunk = chunks[-1]
last_chunk.exits.remove(ex)
last_chunk.exits.add(penult.offset)
- chunk = Chunk(penult.offset)
+ chunk = Chunk(
+ penult.offset, last_chunk.line, False
+ )
chunk.exits.add(ex)
chunks.append(chunk)
# Give all the chunks a length.
- chunks[-1].length = bc.next_offset - chunks[-1].byte
+ chunks[-1].length = bc.next_offset - chunks[-1].byte # pylint: disable=W0631,C0301
for i in range(len(chunks)-1):
chunks[i].length = chunks[i+1].byte - chunks[i].byte
+ #self.validate_chunks(chunks)
return chunks
+ def validate_chunks(self, chunks):
+ """Validate the rule that chunks have a single entrance."""
+ # starts is the entrances to the chunks
+ starts = set([ch.byte for ch in chunks])
+ for ch in chunks:
+ assert all([(ex in starts or ex < 0) for ex in ch.exits])
+
def _arcs(self):
"""Find the executable arcs in the code.
- Returns a set of pairs, (from,to). From and to are integer line
- numbers. If from is < 0, then the arc is an entrance into the code
- object. If to is < 0, the arc is an exit from the code object.
+ Yields pairs: (from,to). From and to are integer line numbers. If
+ from is < 0, then the arc is an entrance into the code object. If to
+ is < 0, the arc is an exit from the code object.
"""
chunks = self._split_into_chunks()
@@ -545,65 +571,43 @@ class ByteParser(object):
# A map from byte offsets to chunks jumped into.
byte_chunks = dict([(c.byte, c) for c in chunks])
- # Build a map from byte offsets to actual lines reached.
- byte_lines = {}
- bytes_to_add = set([c.byte for c in chunks])
+ # There's always an entrance at the first chunk.
+ yield (-1, byte_chunks[0].line)
- while bytes_to_add:
- byte_to_add = bytes_to_add.pop()
- if byte_to_add in byte_lines or byte_to_add < 0:
+ # Traverse from the first chunk in each line, and yield arcs where
+ # the trace function will be invoked.
+ for chunk in chunks:
+ if not chunk.first:
continue
- # Which lines does this chunk lead to?
- bytes_considered = set()
- bytes_to_consider = [byte_to_add]
- lines = set()
-
- while bytes_to_consider:
- byte = bytes_to_consider.pop()
- bytes_considered.add(byte)
-
- # Find chunk for byte
- try:
- ch = byte_chunks[byte]
- except KeyError:
- for ch in chunks:
- if ch.byte <= byte < ch.byte+ch.length:
- break
- else:
- # No chunk for this byte!
- raise Exception("Couldn't find chunk @ %d" % byte)
- byte_chunks[byte] = ch
-
- if ch.line:
- lines.add(ch.line)
- else:
- for ex in ch.exits:
- if ex < 0:
- lines.add(ex)
- elif ex not in bytes_considered:
- bytes_to_consider.append(ex)
-
- bytes_to_add.update(ch.exits)
-
- byte_lines[byte_to_add] = lines
-
- # Figure out for each chunk where the exits go.
- arcs = set()
- for chunk in chunks:
- if chunk.line:
- for ex in chunk.exits:
+ chunks_considered = set()
+ chunks_to_consider = [chunk]
+ while chunks_to_consider:
+ # Get the chunk we're considering, and make sure we don't
+ # consider it again
+ this_chunk = chunks_to_consider.pop()
+ chunks_considered.add(this_chunk)
+
+ # For each exit, add the line number if the trace function
+ # would be triggered, or add the chunk to those being
+ # considered if not.
+ for ex in this_chunk.exits:
if ex < 0:
- exit_lines = [ex]
+ yield (chunk.line, ex)
else:
- exit_lines = byte_lines[ex]
- for exit_line in exit_lines:
- if chunk.line != exit_line:
- arcs.add((chunk.line, exit_line))
- for line in byte_lines[0]:
- arcs.add((-1, line))
-
- return arcs
+ next_chunk = byte_chunks[ex]
+ if next_chunk in chunks_considered:
+ continue
+
+ # The trace function is invoked if visiting the first
+ # bytecode in a line, or if the transition is a
+ # backward jump.
+ backward_jump = next_chunk.byte < this_chunk.byte
+ if next_chunk.first or backward_jump:
+ if next_chunk.line != chunk.line:
+ yield (chunk.line, next_chunk.line)
+ else:
+ chunks_to_consider.append(next_chunk)
def _all_chunks(self):
"""Returns a list of `Chunk` objects for this code and its children.
@@ -631,11 +635,11 @@ class ByteParser(object):
class Chunk(object):
- """A sequence of bytecodes with a single entrance.
+ """A sequence of byte codes with a single entrance.
To analyze byte code, we have to divide it into chunks, sequences of byte
- codes such that each basic block has only one entrance, the first
- instruction in the block.
+ codes such that each chunk has only one entrance, the first instruction in
+ the block.
This is almost the CS concept of `basic block`_, except that we're willing
to have many exits from a chunk, and "basic block" is a more cumbersome
@@ -643,158 +647,54 @@ class Chunk(object):
.. _basic block: http://en.wikipedia.org/wiki/Basic_block
+ `line` is the source line number containing this chunk.
+
+ `first` is true if this is the first chunk in the source line.
+
An exit < 0 means the chunk can leave the code (return). The exit is
the negative of the starting line number of the code block.
"""
- def __init__(self, byte, line=0):
+ def __init__(self, byte, line, first):
self.byte = byte
self.line = line
+ self.first = first
self.length = 0
self.exits = set()
def __repr__(self):
- return "<%d+%d @%d %r>" % (
- self.byte, self.length, self.line, list(self.exits)
+ if self.first:
+ bang = "!"
+ else:
+ bang = ""
+ return "<%d+%d @%d%s %r>" % (
+ self.byte, self.length, self.line, bang, list(self.exits)
)
-class AdHocMain(object): # pragma: no cover
- """An ad-hoc main for code parsing experiments."""
+class CachedTokenizer(object):
+ """A one-element cache around tokenize.generate_tokens.
- def main(self, args):
- """A main function for trying the code from the command line."""
+ When reporting, coverage.py tokenizes files twice, once to find the
+ structure of the file, and once to syntax-color it. Tokenizing is
+ expensive, and easily cached.
- from optparse import OptionParser
+ This is a one-element cache so that our twice-in-a-row tokenizing doesn't
+ actually tokenize twice.
- parser = OptionParser()
- parser.add_option(
- "-c", action="store_true", dest="chunks",
- help="Show basic block chunks"
- )
- parser.add_option(
- "-d", action="store_true", dest="dis",
- help="Disassemble"
- )
- parser.add_option(
- "-R", action="store_true", dest="recursive",
- help="Recurse to find source files"
- )
- parser.add_option(
- "-s", action="store_true", dest="source",
- help="Show analyzed source"
- )
- parser.add_option(
- "-t", action="store_true", dest="tokens",
- help="Show tokens"
+ """
+ def __init__(self):
+ self.last_text = None
+ self.last_tokens = None
+
+ def generate_tokens(self, text):
+ """A stand-in for `tokenize.generate_tokens`."""
+ if text != self.last_text:
+ self.last_text = text
+ self.last_tokens = list(
+ tokenize.generate_tokens(StringIO(text).readline)
)
+ return self.last_tokens
- options, args = parser.parse_args()
- if options.recursive:
- if args:
- root = args[0]
- else:
- root = "."
- for root, _, _ in os.walk(root):
- for f in glob.glob(root + "/*.py"):
- self.adhoc_one_file(options, f)
- else:
- self.adhoc_one_file(options, args[0])
-
- def adhoc_one_file(self, options, filename):
- """Process just one file."""
-
- if options.dis or options.chunks:
- try:
- bp = ByteParser(filename=filename)
- except CoverageException:
- _, err, _ = sys.exc_info()
- print("%s" % (err,))
- return
-
- if options.dis:
- print("Main code:")
- bp._disassemble()
-
- if options.chunks:
- chunks = bp._all_chunks()
- if options.recursive:
- print("%6d: %s" % (len(chunks), filename))
- else:
- print("Chunks: %r" % chunks)
- arcs = bp._all_arcs()
- print("Arcs: %r" % sorted(arcs))
-
- if options.source or options.tokens:
- cp = CodeParser(filename=filename, exclude=r"no\s*cover")
- cp.show_tokens = options.tokens
- cp._raw_parse()
-
- if options.source:
- if options.chunks:
- arc_width, arc_chars = self.arc_ascii_art(arcs)
- else:
- arc_width, arc_chars = 0, {}
-
- exit_counts = cp.exit_counts()
-
- for i, ltext in enumerate(cp.lines):
- lineno = i+1
- m0 = m1 = m2 = m3 = a = ' '
- if lineno in cp.statement_starts:
- m0 = '-'
- exits = exit_counts.get(lineno, 0)
- if exits > 1:
- m1 = str(exits)
- if lineno in cp.docstrings:
- m2 = '"'
- if lineno in cp.classdefs:
- m2 = 'C'
- if lineno in cp.excluded:
- m3 = 'x'
- a = arc_chars.get(lineno, '').ljust(arc_width)
- print("%4d %s%s%s%s%s %s" %
- (lineno, m0, m1, m2, m3, a, ltext)
- )
-
- def arc_ascii_art(self, arcs):
- """Draw arcs as ascii art.
-
- Returns a width of characters needed to draw all the arcs, and a
- dictionary mapping line numbers to ascii strings to draw for that line.
-
- """
- arc_chars = {}
- for lfrom, lto in sorted(arcs):
- if lfrom < 0:
- arc_chars[lto] = arc_chars.get(lto, '') + 'v'
- elif lto < 0:
- arc_chars[lfrom] = arc_chars.get(lfrom, '') + '^'
- else:
- if lfrom == lto - 1:
- # Don't show obvious arcs.
- continue
- if lfrom < lto:
- l1, l2 = lfrom, lto
- else:
- l1, l2 = lto, lfrom
- w = max([len(arc_chars.get(l, '')) for l in range(l1, l2+1)])
- for l in range(l1, l2+1):
- if l == lfrom:
- ch = '<'
- elif l == lto:
- ch = '>'
- else:
- ch = '|'
- arc_chars[l] = arc_chars.get(l, '').ljust(w) + ch
- arc_width = 0
-
- if arc_chars:
- arc_width = max([len(a) for a in arc_chars.values()])
- else:
- arc_width = 0
-
- return arc_width, arc_chars
-
-if __name__ == '__main__':
- AdHocMain().main(sys.argv[1:])
+# Create our generate_tokens cache as a callable replacement function.
+generate_tokens = CachedTokenizer().generate_tokens
diff --git a/python/helpers/coverage/phystokens.py b/python/helpers/coverage/phystokens.py
index fc4f2c9057b1..99b1d5ba0c79 100644
--- a/python/helpers/coverage/phystokens.py
+++ b/python/helpers/coverage/phystokens.py
@@ -1,7 +1,9 @@
"""Better tokenizing for coverage.py."""
-import keyword, re, token, tokenize
-from coverage.backward import StringIO # pylint: disable=W0622
+import codecs, keyword, re, sys, token, tokenize
+from coverage.backward import set # pylint: disable=W0622
+from coverage.parser import generate_tokens
+
def phys_tokens(toks):
"""Return all physical tokens, even line continuations.
@@ -18,7 +20,7 @@ def phys_tokens(toks):
last_ttype = None
for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks:
if last_lineno != elineno:
- if last_line and last_line[-2:] == "\\\n":
+ if last_line and last_line.endswith("\\\n"):
# We are at the beginning of a new line, and the last line
# ended with a backslash. We probably have to inject a
# backslash token into the stream. Unfortunately, there's more
@@ -74,11 +76,11 @@ def source_token_lines(source):
is indistinguishable from a final line with a newline.
"""
- ws_tokens = [token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL]
+ ws_tokens = set([token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL])
line = []
col = 0
source = source.expandtabs(8).replace('\r\n', '\n')
- tokgen = tokenize.generate_tokens(StringIO(source).readline)
+ tokgen = generate_tokens(source)
for ttype, ttext, (_, scol), (_, ecol), _ in phys_tokens(tokgen):
mark_start = True
for part in re.split('(\n)', ttext):
@@ -106,3 +108,103 @@ def source_token_lines(source):
if line:
yield line
+
+def source_encoding(source):
+ """Determine the encoding for `source` (a string), according to PEP 263.
+
+ Returns a string, the name of the encoding.
+
+ """
+ # Note: this function should never be called on Python 3, since py3 has
+ # built-in tools to do this.
+ assert sys.version_info < (3, 0)
+
+ # This is mostly code adapted from Py3.2's tokenize module.
+
+ cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)")
+
+ # Do this so the detect_encode code we copied will work.
+ readline = iter(source.splitlines(True)).next
+
+ def _get_normal_name(orig_enc):
+ """Imitates get_normal_name in tokenizer.c."""
+ # Only care about the first 12 characters.
+ enc = orig_enc[:12].lower().replace("_", "-")
+ if re.match(r"^utf-8($|-)", enc):
+ return "utf-8"
+ if re.match(r"^(latin-1|iso-8859-1|iso-latin-1)($|-)", enc):
+ return "iso-8859-1"
+ return orig_enc
+
+ # From detect_encode():
+ # It detects the encoding from the presence of a utf-8 bom or an encoding
+ # cookie as specified in pep-0263. If both a bom and a cookie are present,
+ # but disagree, a SyntaxError will be raised. If the encoding cookie is an
+ # invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
+ # 'utf-8-sig' is returned.
+
+ # If no encoding is specified, then the default will be returned. The
+ # default varied with version.
+
+ if sys.version_info <= (2, 4):
+ default = 'iso-8859-1'
+ else:
+ default = 'ascii'
+
+ bom_found = False
+ encoding = None
+
+ def read_or_stop():
+ """Get the next source line, or ''."""
+ try:
+ return readline()
+ except StopIteration:
+ return ''
+
+ def find_cookie(line):
+ """Find an encoding cookie in `line`."""
+ try:
+ line_string = line.decode('ascii')
+ except UnicodeDecodeError:
+ return None
+
+ matches = cookie_re.findall(line_string)
+ if not matches:
+ return None
+ encoding = _get_normal_name(matches[0])
+ try:
+ codec = codecs.lookup(encoding)
+ except LookupError:
+ # This behaviour mimics the Python interpreter
+ raise SyntaxError("unknown encoding: " + encoding)
+
+ if bom_found:
+ # codecs in 2.3 were raw tuples of functions, assume the best.
+ codec_name = getattr(codec, 'name', encoding)
+ if codec_name != 'utf-8':
+ # This behaviour mimics the Python interpreter
+ raise SyntaxError('encoding problem: utf-8')
+ encoding += '-sig'
+ return encoding
+
+ first = read_or_stop()
+ if first.startswith(codecs.BOM_UTF8):
+ bom_found = True
+ first = first[3:]
+ default = 'utf-8-sig'
+ if not first:
+ return default
+
+ encoding = find_cookie(first)
+ if encoding:
+ return encoding
+
+ second = read_or_stop()
+ if not second:
+ return default
+
+ encoding = find_cookie(second)
+ if encoding:
+ return encoding
+
+ return default
diff --git a/python/helpers/coverage/report.py b/python/helpers/coverage/report.py
index 6c5510ad4677..34f44422298e 100644
--- a/python/helpers/coverage/report.py
+++ b/python/helpers/coverage/report.py
@@ -2,20 +2,21 @@
import fnmatch, os
from coverage.codeunit import code_unit_factory
+from coverage.files import prep_patterns
from coverage.misc import CoverageException, NoSource, NotPython
class Reporter(object):
"""A base class for all reporters."""
- def __init__(self, coverage, ignore_errors=False):
+ def __init__(self, coverage, config):
"""Create a reporter.
- `coverage` is the coverage instance. `ignore_errors` controls how
- skittish the reporter will be during file processing.
+ `coverage` is the coverage instance. `config` is an instance of
+ CoverageConfig, for controlling all sorts of behavior.
"""
self.coverage = coverage
- self.ignore_errors = ignore_errors
+ self.config = config
# The code units to report on. Set by find_code_units.
self.code_units = []
@@ -24,19 +25,18 @@ class Reporter(object):
# classes.
self.directory = None
- def find_code_units(self, morfs, config):
+ def find_code_units(self, morfs):
"""Find the code units we'll report on.
- `morfs` is a list of modules or filenames. `config` is a
- CoverageConfig instance.
+ `morfs` is a list of modules or filenames.
"""
morfs = morfs or self.coverage.data.measured_files()
file_locator = self.coverage.file_locator
self.code_units = code_unit_factory(morfs, file_locator)
- if config.include:
- patterns = [file_locator.abs_file(p) for p in config.include]
+ if self.config.include:
+ patterns = prep_patterns(self.config.include)
filtered = []
for cu in self.code_units:
for pattern in patterns:
@@ -45,8 +45,8 @@ class Reporter(object):
break
self.code_units = filtered
- if config.omit:
- patterns = [file_locator.abs_file(p) for p in config.omit]
+ if self.config.omit:
+ patterns = prep_patterns(self.config.omit)
filtered = []
for cu in self.code_units:
for pattern in patterns:
@@ -58,7 +58,7 @@ class Reporter(object):
self.code_units.sort()
- def report_files(self, report_fn, morfs, config, directory=None):
+ def report_files(self, report_fn, morfs, directory=None):
"""Run a reporting function on a number of morfs.
`report_fn` is called for each relative morf in `morfs`. It is called
@@ -69,10 +69,8 @@ class Reporter(object):
where `code_unit` is the `CodeUnit` for the morf, and `analysis` is
the `Analysis` for the morf.
- `config` is a CoverageConfig instance.
-
"""
- self.find_code_units(morfs, config)
+ self.find_code_units(morfs)
if not self.code_units:
raise CoverageException("No data to report.")
@@ -84,6 +82,11 @@ class Reporter(object):
for cu in self.code_units:
try:
report_fn(cu, self.coverage._analyze(cu))
- except (NoSource, NotPython):
- if not self.ignore_errors:
+ except NoSource:
+ if not self.config.ignore_errors:
+ raise
+ except NotPython:
+ # Only report errors for .py files, and only if we didn't
+ # explicitly suppress those errors.
+ if cu.should_be_python() and not self.config.ignore_errors:
raise
diff --git a/python/helpers/coverage/results.py b/python/helpers/coverage/results.py
index adfb8f42de5a..db6df0d30b7e 100644
--- a/python/helpers/coverage/results.py
+++ b/python/helpers/coverage/results.py
@@ -2,7 +2,7 @@
import os
-from coverage.backward import set, sorted # pylint: disable=W0622
+from coverage.backward import iitems, set, sorted # pylint: disable=W0622
from coverage.misc import format_lines, join_regex, NoSource
from coverage.parser import CodeParser
@@ -15,16 +15,10 @@ class Analysis(object):
self.code_unit = code_unit
self.filename = self.code_unit.filename
- ext = os.path.splitext(self.filename)[1]
- source = None
- if ext == '.py':
- if not os.path.exists(self.filename):
- source = self.coverage.file_locator.get_zip_data(self.filename)
- if not source:
- raise NoSource("No source for code: %r" % self.filename)
+ actual_filename, source = self.find_source(self.filename)
self.parser = CodeParser(
- text=source, filename=self.filename,
+ text=source, filename=actual_filename,
exclude=self.coverage._exclude_regex('exclude')
)
self.statements, self.excluded = self.parser.parse_source()
@@ -32,7 +26,7 @@ class Analysis(object):
# Identify missing statements.
executed = self.coverage.data.executed_lines(self.filename)
exec1 = self.parser.first_lines(executed)
- self.missing = sorted(set(self.statements) - set(exec1))
+ self.missing = self.statements - exec1
if self.coverage.data.has_arcs():
self.no_branch = self.parser.lines_matching(
@@ -41,9 +35,12 @@ class Analysis(object):
)
n_branches = self.total_branches()
mba = self.missing_branch_arcs()
- n_missing_branches = sum([len(v) for v in mba.values()])
+ n_partial_branches = sum(
+ [len(v) for k,v in iitems(mba) if k not in self.missing]
+ )
+ n_missing_branches = sum([len(v) for k,v in iitems(mba)])
else:
- n_branches = n_missing_branches = 0
+ n_branches = n_partial_branches = n_missing_branches = 0
self.no_branch = set()
self.numbers = Numbers(
@@ -52,9 +49,48 @@ class Analysis(object):
n_excluded=len(self.excluded),
n_missing=len(self.missing),
n_branches=n_branches,
+ n_partial_branches=n_partial_branches,
n_missing_branches=n_missing_branches,
)
+ def find_source(self, filename):
+ """Find the source for `filename`.
+
+ Returns two values: the actual filename, and the source.
+
+ The source returned depends on which of these cases holds:
+
+ * The filename seems to be a non-source file: returns None
+
+ * The filename is a source file, and actually exists: returns None.
+
+ * The filename is a source file, and is in a zip file or egg:
+ returns the source.
+
+ * The filename is a source file, but couldn't be found: raises
+ `NoSource`.
+
+ """
+ source = None
+
+ base, ext = os.path.splitext(filename)
+ TRY_EXTS = {
+ '.py': ['.py', '.pyw'],
+ '.pyw': ['.pyw'],
+ }
+ try_exts = TRY_EXTS.get(ext)
+ if not try_exts:
+ return filename, None
+
+ for try_ext in try_exts:
+ try_filename = base + try_ext
+ if os.path.exists(try_filename):
+ return try_filename, None
+ source = self.coverage.file_locator.get_zip_data(try_filename)
+ if source:
+ return try_filename, source
+ raise NoSource("No source for code: '%s'" % filename)
+
def missing_formatted(self):
"""The missing line numbers, formatted nicely.
@@ -107,7 +143,7 @@ class Analysis(object):
def branch_lines(self):
"""Returns a list of line numbers that have more than one exit."""
exit_counts = self.parser.exit_counts()
- return [l1 for l1,count in exit_counts.items() if count > 1]
+ return [l1 for l1,count in iitems(exit_counts) if count > 1]
def total_branches(self):
"""How many total branches are there?"""
@@ -164,13 +200,14 @@ class Numbers(object):
_near100 = 99.0
def __init__(self, n_files=0, n_statements=0, n_excluded=0, n_missing=0,
- n_branches=0, n_missing_branches=0
+ n_branches=0, n_partial_branches=0, n_missing_branches=0
):
self.n_files = n_files
self.n_statements = n_statements
self.n_excluded = n_excluded
self.n_missing = n_missing
self.n_branches = n_branches
+ self.n_partial_branches = n_partial_branches
self.n_missing_branches = n_missing_branches
def set_precision(cls, precision):
@@ -234,8 +271,12 @@ class Numbers(object):
nums.n_excluded = self.n_excluded + other.n_excluded
nums.n_missing = self.n_missing + other.n_missing
nums.n_branches = self.n_branches + other.n_branches
- nums.n_missing_branches = (self.n_missing_branches +
- other.n_missing_branches)
+ nums.n_partial_branches = (
+ self.n_partial_branches + other.n_partial_branches
+ )
+ nums.n_missing_branches = (
+ self.n_missing_branches + other.n_missing_branches
+ )
return nums
def __radd__(self, other):
diff --git a/python/helpers/coverage/summary.py b/python/helpers/coverage/summary.py
index 599ae78221ff..c99c53034aac 100644
--- a/python/helpers/coverage/summary.py
+++ b/python/helpers/coverage/summary.py
@@ -4,24 +4,23 @@ import sys
from coverage.report import Reporter
from coverage.results import Numbers
+from coverage.misc import NotPython
class SummaryReporter(Reporter):
"""A reporter for writing the summary report."""
- def __init__(self, coverage, show_missing=True, ignore_errors=False):
- super(SummaryReporter, self).__init__(coverage, ignore_errors)
- self.show_missing = show_missing
+ def __init__(self, coverage, config):
+ super(SummaryReporter, self).__init__(coverage, config)
self.branches = coverage.data.has_arcs()
- def report(self, morfs, outfile=None, config=None):
+ def report(self, morfs, outfile=None):
"""Writes a report summarizing coverage statistics per module.
- `outfile` is a file object to write the summary to. `config` is a
- CoverageConfig instance.
+ `outfile` is a file object to write the summary to.
"""
- self.find_code_units(morfs, config)
+ self.find_code_units(morfs)
# Prepare the formatting strings
max_name = max([len(cu.name) for cu in self.code_units] + [5])
@@ -30,12 +29,12 @@ class SummaryReporter(Reporter):
header = (fmt_name % "Name") + " Stmts Miss"
fmt_coverage = fmt_name + "%6d %6d"
if self.branches:
- header += " Branch BrPart"
+ header += " Branch BrMiss"
fmt_coverage += " %6d %6d"
width100 = Numbers.pc_str_width()
header += "%*s" % (width100+4, "Cover")
fmt_coverage += "%%%ds%%%%" % (width100+3,)
- if self.show_missing:
+ if self.config.show_missing:
header += " Missing"
fmt_coverage += " %s"
rule = "-" * len(header) + "\n"
@@ -59,15 +58,19 @@ class SummaryReporter(Reporter):
if self.branches:
args += (nums.n_branches, nums.n_missing_branches)
args += (nums.pc_covered_str,)
- if self.show_missing:
+ if self.config.show_missing:
args += (analysis.missing_formatted(),)
outfile.write(fmt_coverage % args)
total += nums
- except KeyboardInterrupt: # pragma: no cover
+ except KeyboardInterrupt: # pragma: not covered
raise
except:
- if not self.ignore_errors:
+ report_it = not self.config.ignore_errors
+ if report_it:
typ, msg = sys.exc_info()[:2]
+ if typ is NotPython and not cu.should_be_python():
+ report_it = False
+ if report_it:
outfile.write(fmt_err % (cu.name, typ.__name__, msg))
if total.n_files > 1:
@@ -76,6 +79,8 @@ class SummaryReporter(Reporter):
if self.branches:
args += (total.n_branches, total.n_missing_branches)
args += (total.pc_covered_str,)
- if self.show_missing:
+ if self.config.show_missing:
args += ("",)
outfile.write(fmt_coverage % args)
+
+ return total.pc_covered
diff --git a/python/helpers/coverage/templite.py b/python/helpers/coverage/templite.py
index c39e061efd92..e5c0bafefb77 100644
--- a/python/helpers/coverage/templite.py
+++ b/python/helpers/coverage/templite.py
@@ -2,7 +2,53 @@
# Coincidentally named the same as http://code.activestate.com/recipes/496702/
-import re, sys
+import re
+
+from coverage.backward import set # pylint: disable=W0622
+
+
+class CodeBuilder(object):
+ """Build source code conveniently."""
+
+ def __init__(self, indent=0):
+ self.code = []
+ self.indent_amount = indent
+
+ def add_line(self, line):
+ """Add a line of source to the code.
+
+ Don't include indentations or newlines.
+
+ """
+ self.code.append(" " * self.indent_amount)
+ self.code.append(line)
+ self.code.append("\n")
+
+ def add_section(self):
+ """Add a section, a sub-CodeBuilder."""
+ sect = CodeBuilder(self.indent_amount)
+ self.code.append(sect)
+ return sect
+
+ def indent(self):
+ """Increase the current indent for following lines."""
+ self.indent_amount += 4
+
+ def dedent(self):
+ """Decrease the current indent for following lines."""
+ self.indent_amount -= 4
+
+ def __str__(self):
+ return "".join([str(c) for c in self.code])
+
+ def get_function(self, fn_name):
+ """Compile the code, and return the function `fn_name`."""
+ assert self.indent_amount == 0
+ g = {}
+ code_text = str(self)
+ exec(code_text, g)
+ return g[fn_name]
+
class Templite(object):
"""A simple template renderer, for a nano-subset of Django syntax.
@@ -39,53 +85,104 @@ class Templite(object):
for context in contexts:
self.context.update(context)
+ # We construct a function in source form, then compile it and hold onto
+ # it, and execute it to render the template.
+ code = CodeBuilder()
+
+ code.add_line("def render(ctx, dot):")
+ code.indent()
+ vars_code = code.add_section()
+ self.all_vars = set()
+ self.loop_vars = set()
+ code.add_line("result = []")
+ code.add_line("a = result.append")
+ code.add_line("e = result.extend")
+ code.add_line("s = str")
+
+ buffered = []
+ def flush_output():
+ """Force `buffered` to the code builder."""
+ if len(buffered) == 1:
+ code.add_line("a(%s)" % buffered[0])
+ elif len(buffered) > 1:
+ code.add_line("e([%s])" % ",".join(buffered))
+ del buffered[:]
+
# Split the text to form a list of tokens.
toks = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text)
- # Parse the tokens into a nested list of operations. Each item in the
- # list is a tuple with an opcode, and arguments. They'll be
- # interpreted by TempliteEngine.
- #
- # When parsing an action tag with nested content (if, for), the current
- # ops list is pushed onto ops_stack, and the parsing continues in a new
- # ops list that is part of the arguments to the if or for op.
- ops = []
ops_stack = []
for tok in toks:
if tok.startswith('{{'):
- # Expression: ('exp', expr)
- ops.append(('exp', tok[2:-2].strip()))
+ # An expression to evaluate.
+ buffered.append("s(%s)" % self.expr_code(tok[2:-2].strip()))
elif tok.startswith('{#'):
# Comment: ignore it and move on.
continue
elif tok.startswith('{%'):
# Action tag: split into words and parse further.
+ flush_output()
words = tok[2:-2].strip().split()
if words[0] == 'if':
- # If: ('if', (expr, body_ops))
- if_ops = []
+ # An if statement: evaluate the expression to determine if.
assert len(words) == 2
- ops.append(('if', (words[1], if_ops)))
- ops_stack.append(ops)
- ops = if_ops
+ ops_stack.append('if')
+ code.add_line("if %s:" % self.expr_code(words[1]))
+ code.indent()
elif words[0] == 'for':
- # For: ('for', (varname, listexpr, body_ops))
+ # A loop: iterate over expression result.
assert len(words) == 4 and words[2] == 'in'
- for_ops = []
- ops.append(('for', (words[1], words[3], for_ops)))
- ops_stack.append(ops)
- ops = for_ops
+ ops_stack.append('for')
+ self.loop_vars.add(words[1])
+ code.add_line(
+ "for c_%s in %s:" % (
+ words[1],
+ self.expr_code(words[3])
+ )
+ )
+ code.indent()
elif words[0].startswith('end'):
# Endsomething. Pop the ops stack
- ops = ops_stack.pop()
- assert ops[-1][0] == words[0][3:]
+ end_what = words[0][3:]
+ if ops_stack[-1] != end_what:
+ raise SyntaxError("Mismatched end tag: %r" % end_what)
+ ops_stack.pop()
+ code.dedent()
else:
- raise SyntaxError("Don't understand tag %r" % words)
+ raise SyntaxError("Don't understand tag: %r" % words[0])
else:
- ops.append(('lit', tok))
+ # Literal content. If it isn't empty, output it.
+ if tok:
+ buffered.append("%r" % tok)
+ flush_output()
- assert not ops_stack, "Unmatched action tag: %r" % ops_stack[-1][0]
- self.ops = ops
+ for var_name in self.all_vars - self.loop_vars:
+ vars_code.add_line("c_%s = ctx[%r]" % (var_name, var_name))
+
+ if ops_stack:
+ raise SyntaxError("Unmatched action tag: %r" % ops_stack[-1])
+
+ code.add_line("return ''.join(result)")
+ code.dedent()
+ self.render_function = code.get_function('render')
+
+ def expr_code(self, expr):
+ """Generate a Python expression for `expr`."""
+ if "|" in expr:
+ pipes = expr.split("|")
+ code = self.expr_code(pipes[0])
+ for func in pipes[1:]:
+ self.all_vars.add(func)
+ code = "c_%s(%s)" % (func, code)
+ elif "." in expr:
+ dots = expr.split(".")
+ code = self.expr_code(dots[0])
+ args = [repr(d) for d in dots[1:]]
+ code = "dot(%s, %s)" % (code, ", ".join(args))
+ else:
+ self.all_vars.add(expr)
+ code = "c_%s" % expr
+ return code
def render(self, context=None):
"""Render this template by applying it to `context`.
@@ -97,70 +194,15 @@ class Templite(object):
ctx = dict(self.context)
if context:
ctx.update(context)
-
- # Run it through an engine, and return the result.
- engine = _TempliteEngine(ctx)
- engine.execute(self.ops)
- return "".join(engine.result)
-
-
-class _TempliteEngine(object):
- """Executes Templite objects to produce strings."""
- def __init__(self, context):
- self.context = context
- self.result = []
-
- def execute(self, ops):
- """Execute `ops` in the engine.
-
- Called recursively for the bodies of if's and loops.
-
- """
- for op, args in ops:
- if op == 'lit':
- self.result.append(args)
- elif op == 'exp':
- try:
- self.result.append(str(self.evaluate(args)))
- except:
- exc_class, exc, _ = sys.exc_info()
- new_exc = exc_class("Couldn't evaluate {{ %s }}: %s"
- % (args, exc))
- raise new_exc
- elif op == 'if':
- expr, body = args
- if self.evaluate(expr):
- self.execute(body)
- elif op == 'for':
- var, lis, body = args
- vals = self.evaluate(lis)
- for val in vals:
- self.context[var] = val
- self.execute(body)
- else:
- raise AssertionError("TempliteEngine doesn't grok op %r" % op)
-
- def evaluate(self, expr):
- """Evaluate an expression.
-
- `expr` can have pipes and dots to indicate data access and filtering.
-
- """
- if "|" in expr:
- pipes = expr.split("|")
- value = self.evaluate(pipes[0])
- for func in pipes[1:]:
- value = self.evaluate(func)(value)
- elif "." in expr:
- dots = expr.split('.')
- value = self.evaluate(dots[0])
- for dot in dots[1:]:
- try:
- value = getattr(value, dot)
- except AttributeError:
- value = value[dot]
- if hasattr(value, '__call__'):
- value = value()
- else:
- value = self.context[expr]
+ return self.render_function(ctx, self.do_dots)
+
+ def do_dots(self, value, *dots):
+ """Evaluate dotted expressions at runtime."""
+ for dot in dots:
+ try:
+ value = getattr(value, dot)
+ except AttributeError:
+ value = value[dot]
+ if hasattr(value, '__call__'):
+ value = value()
return value
diff --git a/python/helpers/coverage/tracer.c b/python/helpers/coverage/tracer.c
new file mode 100644
index 000000000000..97dd113b8b3a
--- /dev/null
+++ b/python/helpers/coverage/tracer.c
@@ -0,0 +1,730 @@
+/* C-based Tracer for Coverage. */
+
+#include "Python.h"
+#include "compile.h" /* in 2.3, this wasn't part of Python.h */
+#include "eval.h" /* or this. */
+#include "structmember.h"
+#include "frameobject.h"
+
+/* Compile-time debugging helpers */
+#undef WHAT_LOG /* Define to log the WHAT params in the trace function. */
+#undef TRACE_LOG /* Define to log our bookkeeping. */
+#undef COLLECT_STATS /* Collect counters: stats are printed when tracer is stopped. */
+
+#if COLLECT_STATS
+#define STATS(x) x
+#else
+#define STATS(x)
+#endif
+
+/* Py 2.x and 3.x compatibility */
+
+#ifndef Py_TYPE
+#define Py_TYPE(o) (((PyObject*)(o))->ob_type)
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+
+#define MyText_Type PyUnicode_Type
+#define MyText_Check(o) PyUnicode_Check(o)
+#define MyText_AS_BYTES(o) PyUnicode_AsASCIIString(o)
+#define MyText_AS_STRING(o) PyBytes_AS_STRING(o)
+#define MyInt_FromLong(l) PyLong_FromLong(l)
+
+#define MyType_HEAD_INIT PyVarObject_HEAD_INIT(NULL, 0)
+
+#else
+
+#define MyText_Type PyString_Type
+#define MyText_Check(o) PyString_Check(o)
+#define MyText_AS_BYTES(o) (Py_INCREF(o), o)
+#define MyText_AS_STRING(o) PyString_AS_STRING(o)
+#define MyInt_FromLong(l) PyInt_FromLong(l)
+
+#define MyType_HEAD_INIT PyObject_HEAD_INIT(NULL) 0,
+
+#endif /* Py3k */
+
+/* The values returned to indicate ok or error. */
+#define RET_OK 0
+#define RET_ERROR -1
+
+/* An entry on the data stack. For each call frame, we need to record the
+ dictionary to capture data, and the last line number executed in that
+ frame.
+*/
+typedef struct {
+ PyObject * file_data; /* PyMem_Malloc'ed, a borrowed ref. */
+ int last_line;
+} DataStackEntry;
+
+/* The CTracer type. */
+
+typedef struct {
+ PyObject_HEAD
+
+ /* Python objects manipulated directly by the Collector class. */
+ PyObject * should_trace;
+ PyObject * warn;
+ PyObject * data;
+ PyObject * should_trace_cache;
+ PyObject * arcs;
+
+ /* Has the tracer been started? */
+ int started;
+ /* Are we tracing arcs, or just lines? */
+ int tracing_arcs;
+
+ /*
+ The data stack is a stack of dictionaries. Each dictionary collects
+ data for a single source file. The data stack parallels the call stack:
+ each call pushes the new frame's file data onto the data stack, and each
+ return pops file data off.
+
+ The file data is a dictionary whose form depends on the tracing options.
+ If tracing arcs, the keys are line number pairs. If not tracing arcs,
+ the keys are line numbers. In both cases, the value is irrelevant
+ (None).
+ */
+ /* The index of the last-used entry in data_stack. */
+ int depth;
+ /* The file data at each level, or NULL if not recording. */
+ DataStackEntry * data_stack;
+ int data_stack_alloc; /* number of entries allocated at data_stack. */
+
+ /* The current file_data dictionary. Borrowed. */
+ PyObject * cur_file_data;
+
+ /* The line number of the last line recorded, for tracing arcs.
+ -1 means there was no previous line, as when entering a code object.
+ */
+ int last_line;
+
+ /* The parent frame for the last exception event, to fix missing returns. */
+ PyFrameObject * last_exc_back;
+ int last_exc_firstlineno;
+
+#if COLLECT_STATS
+ struct {
+ unsigned int calls;
+ unsigned int lines;
+ unsigned int returns;
+ unsigned int exceptions;
+ unsigned int others;
+ unsigned int new_files;
+ unsigned int missed_returns;
+ unsigned int stack_reallocs;
+ unsigned int errors;
+ } stats;
+#endif /* COLLECT_STATS */
+} CTracer;
+
+#define STACK_DELTA 100
+
+static int
+CTracer_init(CTracer *self, PyObject *args_unused, PyObject *kwds_unused)
+{
+#if COLLECT_STATS
+ self->stats.calls = 0;
+ self->stats.lines = 0;
+ self->stats.returns = 0;
+ self->stats.exceptions = 0;
+ self->stats.others = 0;
+ self->stats.new_files = 0;
+ self->stats.missed_returns = 0;
+ self->stats.stack_reallocs = 0;
+ self->stats.errors = 0;
+#endif /* COLLECT_STATS */
+
+ self->should_trace = NULL;
+ self->warn = NULL;
+ self->data = NULL;
+ self->should_trace_cache = NULL;
+ self->arcs = NULL;
+
+ self->started = 0;
+ self->tracing_arcs = 0;
+
+ self->depth = -1;
+ self->data_stack = PyMem_Malloc(STACK_DELTA*sizeof(DataStackEntry));
+ if (self->data_stack == NULL) {
+ STATS( self->stats.errors++; )
+ PyErr_NoMemory();
+ return RET_ERROR;
+ }
+ self->data_stack_alloc = STACK_DELTA;
+
+ self->cur_file_data = NULL;
+ self->last_line = -1;
+
+ self->last_exc_back = NULL;
+
+ return RET_OK;
+}
+
+static void
+CTracer_dealloc(CTracer *self)
+{
+ if (self->started) {
+ PyEval_SetTrace(NULL, NULL);
+ }
+
+ Py_XDECREF(self->should_trace);
+ Py_XDECREF(self->warn);
+ Py_XDECREF(self->data);
+ Py_XDECREF(self->should_trace_cache);
+
+ PyMem_Free(self->data_stack);
+
+ Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+#if TRACE_LOG
+static const char *
+indent(int n)
+{
+ static const char * spaces =
+ " "
+ " "
+ " "
+ " "
+ ;
+ return spaces + strlen(spaces) - n*2;
+}
+
+static int logging = 0;
+/* Set these constants to be a file substring and line number to start logging. */
+static const char * start_file = "tests/views";
+static int start_line = 27;
+
+static void
+showlog(int depth, int lineno, PyObject * filename, const char * msg)
+{
+ if (logging) {
+ printf("%s%3d ", indent(depth), depth);
+ if (lineno) {
+ printf("%4d", lineno);
+ }
+ else {
+ printf(" ");
+ }
+ if (filename) {
+ PyObject *ascii = MyText_AS_BYTES(filename);
+ printf(" %s", MyText_AS_STRING(ascii));
+ Py_DECREF(ascii);
+ }
+ if (msg) {
+ printf(" %s", msg);
+ }
+ printf("\n");
+ }
+}
+
+#define SHOWLOG(a,b,c,d) showlog(a,b,c,d)
+#else
+#define SHOWLOG(a,b,c,d)
+#endif /* TRACE_LOG */
+
+#if WHAT_LOG
+static const char * what_sym[] = {"CALL", "EXC ", "LINE", "RET "};
+#endif
+
+/* Record a pair of integers in self->cur_file_data. */
+static int
+CTracer_record_pair(CTracer *self, int l1, int l2)
+{
+ int ret = RET_OK;
+
+ PyObject * t = Py_BuildValue("(ii)", l1, l2);
+ if (t != NULL) {
+ if (PyDict_SetItem(self->cur_file_data, t, Py_None) < 0) {
+ STATS( self->stats.errors++; )
+ ret = RET_ERROR;
+ }
+ Py_DECREF(t);
+ }
+ else {
+ STATS( self->stats.errors++; )
+ ret = RET_ERROR;
+ }
+ return ret;
+}
+
+/*
+ * The Trace Function
+ */
+static int
+CTracer_trace(CTracer *self, PyFrameObject *frame, int what, PyObject *arg_unused)
+{
+ int ret = RET_OK;
+ PyObject * filename = NULL;
+ PyObject * tracename = NULL;
+ #if WHAT_LOG || TRACE_LOG
+ PyObject * ascii = NULL;
+ #endif
+
+ #if WHAT_LOG
+ if (what <= sizeof(what_sym)/sizeof(const char *)) {
+ ascii = MyText_AS_BYTES(frame->f_code->co_filename);
+ printf("trace: %s @ %s %d\n", what_sym[what], MyText_AS_STRING(ascii), frame->f_lineno);
+ Py_DECREF(ascii);
+ }
+ #endif
+
+ #if TRACE_LOG
+ ascii = MyText_AS_BYTES(frame->f_code->co_filename);
+ if (strstr(MyText_AS_STRING(ascii), start_file) && frame->f_lineno == start_line) {
+ logging = 1;
+ }
+ Py_DECREF(ascii);
+ #endif
+
+ /* See below for details on missing-return detection. */
+ if (self->last_exc_back) {
+ if (frame == self->last_exc_back) {
+ /* Looks like someone forgot to send a return event. We'll clear
+ the exception state and do the RETURN code here. Notice that the
+ frame we have in hand here is not the correct frame for the RETURN,
+ that frame is gone. Our handling for RETURN doesn't need the
+ actual frame, but we do log it, so that will look a little off if
+ you're looking at the detailed log.
+
+ If someday we need to examine the frame when doing RETURN, then
+ we'll need to keep more of the missed frame's state.
+ */
+ STATS( self->stats.missed_returns++; )
+ if (self->depth >= 0) {
+ if (self->tracing_arcs && self->cur_file_data) {
+ if (CTracer_record_pair(self, self->last_line, -self->last_exc_firstlineno) < 0) {
+ return RET_ERROR;
+ }
+ }
+ SHOWLOG(self->depth, frame->f_lineno, frame->f_code->co_filename, "missedreturn");
+ self->cur_file_data = self->data_stack[self->depth].file_data;
+ self->last_line = self->data_stack[self->depth].last_line;
+ self->depth--;
+ }
+ }
+ self->last_exc_back = NULL;
+ }
+
+
+ switch (what) {
+ case PyTrace_CALL: /* 0 */
+ STATS( self->stats.calls++; )
+ /* Grow the stack. */
+ self->depth++;
+ if (self->depth >= self->data_stack_alloc) {
+ STATS( self->stats.stack_reallocs++; )
+ /* We've outgrown our data_stack array: make it bigger. */
+ int bigger = self->data_stack_alloc + STACK_DELTA;
+ DataStackEntry * bigger_data_stack = PyMem_Realloc(self->data_stack, bigger * sizeof(DataStackEntry));
+ if (bigger_data_stack == NULL) {
+ STATS( self->stats.errors++; )
+ PyErr_NoMemory();
+ self->depth--;
+ return RET_ERROR;
+ }
+ self->data_stack = bigger_data_stack;
+ self->data_stack_alloc = bigger;
+ }
+
+ /* Push the current state on the stack. */
+ self->data_stack[self->depth].file_data = self->cur_file_data;
+ self->data_stack[self->depth].last_line = self->last_line;
+
+ /* Check if we should trace this line. */
+ filename = frame->f_code->co_filename;
+ tracename = PyDict_GetItem(self->should_trace_cache, filename);
+ if (tracename == NULL) {
+ STATS( self->stats.new_files++; )
+ /* We've never considered this file before. */
+ /* Ask should_trace about it. */
+ PyObject * args = Py_BuildValue("(OO)", filename, frame);
+ tracename = PyObject_Call(self->should_trace, args, NULL);
+ Py_DECREF(args);
+ if (tracename == NULL) {
+ /* An error occurred inside should_trace. */
+ STATS( self->stats.errors++; )
+ return RET_ERROR;
+ }
+ if (PyDict_SetItem(self->should_trace_cache, filename, tracename) < 0) {
+ STATS( self->stats.errors++; )
+ return RET_ERROR;
+ }
+ }
+ else {
+ Py_INCREF(tracename);
+ }
+
+ /* If tracename is a string, then we're supposed to trace. */
+ if (MyText_Check(tracename)) {
+ PyObject * file_data = PyDict_GetItem(self->data, tracename);
+ if (file_data == NULL) {
+ file_data = PyDict_New();
+ if (file_data == NULL) {
+ STATS( self->stats.errors++; )
+ return RET_ERROR;
+ }
+ ret = PyDict_SetItem(self->data, tracename, file_data);
+ Py_DECREF(file_data);
+ if (ret < 0) {
+ STATS( self->stats.errors++; )
+ return RET_ERROR;
+ }
+ }
+ self->cur_file_data = file_data;
+ /* Make the frame right in case settrace(gettrace()) happens. */
+ Py_INCREF(self);
+ frame->f_trace = (PyObject*)self;
+ SHOWLOG(self->depth, frame->f_lineno, filename, "traced");
+ }
+ else {
+ self->cur_file_data = NULL;
+ SHOWLOG(self->depth, frame->f_lineno, filename, "skipped");
+ }
+
+ Py_DECREF(tracename);
+
+ self->last_line = -1;
+ break;
+
+ case PyTrace_RETURN: /* 3 */
+ STATS( self->stats.returns++; )
+ /* A near-copy of this code is above in the missing-return handler. */
+ if (self->depth >= 0) {
+ if (self->tracing_arcs && self->cur_file_data) {
+ int first = frame->f_code->co_firstlineno;
+ if (CTracer_record_pair(self, self->last_line, -first) < 0) {
+ return RET_ERROR;
+ }
+ }
+
+ SHOWLOG(self->depth, frame->f_lineno, frame->f_code->co_filename, "return");
+ self->cur_file_data = self->data_stack[self->depth].file_data;
+ self->last_line = self->data_stack[self->depth].last_line;
+ self->depth--;
+ }
+ break;
+
+ case PyTrace_LINE: /* 2 */
+ STATS( self->stats.lines++; )
+ if (self->depth >= 0) {
+ SHOWLOG(self->depth, frame->f_lineno, frame->f_code->co_filename, "line");
+ if (self->cur_file_data) {
+ /* We're tracing in this frame: record something. */
+ if (self->tracing_arcs) {
+ /* Tracing arcs: key is (last_line,this_line). */
+ if (CTracer_record_pair(self, self->last_line, frame->f_lineno) < 0) {
+ return RET_ERROR;
+ }
+ }
+ else {
+ /* Tracing lines: key is simply this_line. */
+ PyObject * this_line = MyInt_FromLong(frame->f_lineno);
+ if (this_line == NULL) {
+ STATS( self->stats.errors++; )
+ return RET_ERROR;
+ }
+ ret = PyDict_SetItem(self->cur_file_data, this_line, Py_None);
+ Py_DECREF(this_line);
+ if (ret < 0) {
+ STATS( self->stats.errors++; )
+ return RET_ERROR;
+ }
+ }
+ }
+ self->last_line = frame->f_lineno;
+ }
+ break;
+
+ case PyTrace_EXCEPTION:
+ /* Some code (Python 2.3, and pyexpat anywhere) fires an exception event
+ without a return event. To detect that, we'll keep a copy of the
+ parent frame for an exception event. If the next event is in that
+ frame, then we must have returned without a return event. We can
+ synthesize the missing event then.
+
+ Python itself fixed this problem in 2.4. Pyexpat still has the bug.
+ I've reported the problem with pyexpat as http://bugs.python.org/issue6359 .
+ If it gets fixed, this code should still work properly. Maybe some day
+ the bug will be fixed everywhere coverage.py is supported, and we can
+ remove this missing-return detection.
+
+ More about this fix: http://nedbatchelder.com/blog/200907/a_nasty_little_bug.html
+ */
+ STATS( self->stats.exceptions++; )
+ self->last_exc_back = frame->f_back;
+ self->last_exc_firstlineno = frame->f_code->co_firstlineno;
+ break;
+
+ default:
+ STATS( self->stats.others++; )
+ break;
+ }
+
+ return RET_OK;
+}
+
+/*
+ * Python has two ways to set the trace function: sys.settrace(fn), which
+ * takes a Python callable, and PyEval_SetTrace(func, obj), which takes
+ * a C function and a Python object. The way these work together is that
+ * sys.settrace(pyfn) calls PyEval_SetTrace(builtin_func, pyfn), using the
+ * Python callable as the object in PyEval_SetTrace. So sys.gettrace()
+ * simply returns the Python object used as the second argument to
+ * PyEval_SetTrace. So sys.gettrace() will return our self parameter, which
+ * means it must be callable to be used in sys.settrace().
+ *
+ * So we make our self callable, equivalent to invoking our trace function.
+ *
+ * To help with the process of replaying stored frames, this function has an
+ * optional keyword argument:
+ *
+ * def CTracer_call(frame, event, arg, lineno=0)
+ *
+ * If provided, the lineno argument is used as the line number, and the
+ * frame's f_lineno member is ignored.
+ */
+static PyObject *
+CTracer_call(CTracer *self, PyObject *args, PyObject *kwds)
+{
+ PyFrameObject *frame;
+ PyObject *what_str;
+ PyObject *arg;
+ int lineno = 0;
+ int what;
+ int orig_lineno;
+ PyObject *ret = NULL;
+
+ static char *what_names[] = {
+ "call", "exception", "line", "return",
+ "c_call", "c_exception", "c_return",
+ NULL
+ };
+
+ #if WHAT_LOG
+ printf("pytrace\n");
+ #endif
+
+ static char *kwlist[] = {"frame", "event", "arg", "lineno", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!O!O|i:Tracer_call", kwlist,
+ &PyFrame_Type, &frame, &MyText_Type, &what_str, &arg, &lineno)) {
+ goto done;
+ }
+
+ /* In Python, the what argument is a string, we need to find an int
+ for the C function. */
+ for (what = 0; what_names[what]; what++) {
+ PyObject *ascii = MyText_AS_BYTES(what_str);
+ int should_break = !strcmp(MyText_AS_STRING(ascii), what_names[what]);
+ Py_DECREF(ascii);
+ if (should_break) {
+ break;
+ }
+ }
+
+ /* Save off the frame's lineno, and use the forced one, if provided. */
+ orig_lineno = frame->f_lineno;
+ if (lineno > 0) {
+ frame->f_lineno = lineno;
+ }
+
+ /* Invoke the C function, and return ourselves. */
+ if (CTracer_trace(self, frame, what, arg) == RET_OK) {
+ Py_INCREF(self);
+ ret = (PyObject *)self;
+ }
+
+ /* Clean up. */
+ frame->f_lineno = orig_lineno;
+
+done:
+ return ret;
+}
+
+static PyObject *
+CTracer_start(CTracer *self, PyObject *args_unused)
+{
+ PyEval_SetTrace((Py_tracefunc)CTracer_trace, (PyObject*)self);
+ self->started = 1;
+ self->tracing_arcs = self->arcs && PyObject_IsTrue(self->arcs);
+ self->last_line = -1;
+
+ /* start() returns a trace function usable with sys.settrace() */
+ Py_INCREF(self);
+ return (PyObject *)self;
+}
+
+static PyObject *
+CTracer_stop(CTracer *self, PyObject *args_unused)
+{
+ if (self->started) {
+ PyEval_SetTrace(NULL, NULL);
+ self->started = 0;
+ }
+
+ return Py_BuildValue("");
+}
+
+static PyObject *
+CTracer_get_stats(CTracer *self)
+{
+#if COLLECT_STATS
+ return Py_BuildValue(
+ "{sI,sI,sI,sI,sI,sI,sI,sI,si,sI}",
+ "calls", self->stats.calls,
+ "lines", self->stats.lines,
+ "returns", self->stats.returns,
+ "exceptions", self->stats.exceptions,
+ "others", self->stats.others,
+ "new_files", self->stats.new_files,
+ "missed_returns", self->stats.missed_returns,
+ "stack_reallocs", self->stats.stack_reallocs,
+ "stack_alloc", self->data_stack_alloc,
+ "errors", self->stats.errors
+ );
+#else
+ return Py_BuildValue("");
+#endif /* COLLECT_STATS */
+}
+
+static PyMemberDef
+CTracer_members[] = {
+ { "should_trace", T_OBJECT, offsetof(CTracer, should_trace), 0,
+ PyDoc_STR("Function indicating whether to trace a file.") },
+
+ { "warn", T_OBJECT, offsetof(CTracer, warn), 0,
+ PyDoc_STR("Function for issuing warnings.") },
+
+ { "data", T_OBJECT, offsetof(CTracer, data), 0,
+ PyDoc_STR("The raw dictionary of trace data.") },
+
+ { "should_trace_cache", T_OBJECT, offsetof(CTracer, should_trace_cache), 0,
+ PyDoc_STR("Dictionary caching should_trace results.") },
+
+ { "arcs", T_OBJECT, offsetof(CTracer, arcs), 0,
+ PyDoc_STR("Should we trace arcs, or just lines?") },
+
+ { NULL }
+};
+
+static PyMethodDef
+CTracer_methods[] = {
+ { "start", (PyCFunction) CTracer_start, METH_VARARGS,
+ PyDoc_STR("Start the tracer") },
+
+ { "stop", (PyCFunction) CTracer_stop, METH_VARARGS,
+ PyDoc_STR("Stop the tracer") },
+
+ { "get_stats", (PyCFunction) CTracer_get_stats, METH_VARARGS,
+ PyDoc_STR("Get statistics about the tracing") },
+
+ { NULL }
+};
+
+static PyTypeObject
+CTracerType = {
+ MyType_HEAD_INIT
+ "coverage.CTracer", /*tp_name*/
+ sizeof(CTracer), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ (destructor)CTracer_dealloc, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash */
+ (ternaryfunc)CTracer_call, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
+ "CTracer objects", /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ CTracer_methods, /* tp_methods */
+ CTracer_members, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ (initproc)CTracer_init, /* tp_init */
+ 0, /* tp_alloc */
+ 0, /* tp_new */
+};
+
+/* Module definition */
+
+#define MODULE_DOC PyDoc_STR("Fast coverage tracer.")
+
+#if PY_MAJOR_VERSION >= 3
+
+static PyModuleDef
+moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "coverage.tracer",
+ MODULE_DOC,
+ -1,
+ NULL, /* methods */
+ NULL,
+ NULL, /* traverse */
+ NULL, /* clear */
+ NULL
+};
+
+
+PyObject *
+PyInit_tracer(void)
+{
+ PyObject * mod = PyModule_Create(&moduledef);
+ if (mod == NULL) {
+ return NULL;
+ }
+
+ CTracerType.tp_new = PyType_GenericNew;
+ if (PyType_Ready(&CTracerType) < 0) {
+ Py_DECREF(mod);
+ return NULL;
+ }
+
+ Py_INCREF(&CTracerType);
+ PyModule_AddObject(mod, "CTracer", (PyObject *)&CTracerType);
+
+ return mod;
+}
+
+#else
+
+void
+inittracer(void)
+{
+ PyObject * mod;
+
+ mod = Py_InitModule3("coverage.tracer", NULL, MODULE_DOC);
+ if (mod == NULL) {
+ return;
+ }
+
+ CTracerType.tp_new = PyType_GenericNew;
+ if (PyType_Ready(&CTracerType) < 0) {
+ return;
+ }
+
+ Py_INCREF(&CTracerType);
+ PyModule_AddObject(mod, "CTracer", (PyObject *)&CTracerType);
+}
+
+#endif /* Py3k */
diff --git a/python/helpers/coverage/tracer.pyd b/python/helpers/coverage/tracer.pyd
deleted file mode 100644
index a13aa032abe1..000000000000
--- a/python/helpers/coverage/tracer.pyd
+++ /dev/null
Binary files differ
diff --git a/python/helpers/coverage/version.py b/python/helpers/coverage/version.py
new file mode 100644
index 000000000000..a43bde8023e3
--- /dev/null
+++ b/python/helpers/coverage/version.py
@@ -0,0 +1,9 @@
+"""The version and URL for coverage.py"""
+# This file is exec'ed in setup.py, don't import anything!
+
+__version__ = "3.7.1" # see detailed history in CHANGES.txt
+
+__url__ = "http://nedbatchelder.com/code/coverage"
+if max(__version__).isalpha():
+ # For pre-releases, use a version-specific URL.
+ __url__ += "/" + __version__
diff --git a/python/helpers/coverage/xmlreport.py b/python/helpers/coverage/xmlreport.py
index 5f6cc87e2fef..26ac02ad13d0 100644
--- a/python/helpers/coverage/xmlreport.py
+++ b/python/helpers/coverage/xmlreport.py
@@ -4,7 +4,7 @@ import os, sys, time
import xml.dom.minidom
from coverage import __url__, __version__
-from coverage.backward import sorted # pylint: disable=W0622
+from coverage.backward import sorted, rpartition # pylint: disable=W0622
from coverage.report import Reporter
def rate(hit, num):
@@ -15,20 +15,19 @@ def rate(hit, num):
class XmlReporter(Reporter):
"""A reporter for writing Cobertura-style XML coverage results."""
- def __init__(self, coverage, ignore_errors=False):
- super(XmlReporter, self).__init__(coverage, ignore_errors)
+ def __init__(self, coverage, config):
+ super(XmlReporter, self).__init__(coverage, config)
self.packages = None
self.xml_out = None
self.arcs = coverage.data.has_arcs()
- def report(self, morfs, outfile=None, config=None):
+ def report(self, morfs, outfile=None):
"""Generate a Cobertura-compatible XML report for `morfs`.
`morfs` is a list of modules or filenames.
- `outfile` is a file object to write the XML to. `config` is a
- CoverageConfig instance.
+ `outfile` is a file object to write the XML to.
"""
# Initial setup.
@@ -54,7 +53,7 @@ class XmlReporter(Reporter):
# Call xml_file for each file in the data.
self.packages = {}
- self.report_files(self.xml_file, morfs, config)
+ self.report_files(self.xml_file, morfs)
lnum_tot, lhits_tot = 0, 0
bnum_tot, bhits_tot = 0, 0
@@ -85,14 +84,23 @@ class XmlReporter(Reporter):
# Use the DOM to write the output file.
outfile.write(self.xml_out.toprettyxml())
+ # Return the total percentage.
+ denom = lnum_tot + bnum_tot
+ if denom == 0:
+ pct = 0.0
+ else:
+ pct = 100.0 * (lhits_tot + bhits_tot) / denom
+ return pct
+
def xml_file(self, cu, analysis):
"""Add to the XML report for a single file."""
# Create the 'lines' and 'package' XML elements, which
# are populated later. Note that a package == a directory.
- dirname, fname = os.path.split(cu.name)
- dirname = dirname or '.'
- package = self.packages.setdefault(dirname, [ {}, 0, 0, 0, 0 ])
+ package_name = rpartition(cu.name, ".")[0]
+ className = cu.name
+
+ package = self.packages.setdefault(package_name, [{}, 0, 0, 0, 0])
xclass = self.xml_out.createElement("class")
@@ -100,22 +108,22 @@ class XmlReporter(Reporter):
xlines = self.xml_out.createElement("lines")
xclass.appendChild(xlines)
- className = fname.replace('.', '_')
+
xclass.setAttribute("name", className)
- ext = os.path.splitext(cu.filename)[1]
- xclass.setAttribute("filename", cu.name + ext)
+ filename = cu.file_locator.relative_filename(cu.filename)
+ xclass.setAttribute("filename", filename.replace("\\", "/"))
xclass.setAttribute("complexity", "0")
branch_stats = analysis.branch_stats()
# For each statement, create an XML 'line' element.
- for line in analysis.statements:
+ for line in sorted(analysis.statements):
xline = self.xml_out.createElement("line")
xline.setAttribute("number", str(line))
# Q: can we get info about the number of times a statement is
# executed? If so, that should be recorded here.
- xline.setAttribute("hits", str(int(not line in analysis.missing)))
+ xline.setAttribute("hits", str(int(line not in analysis.missing)))
if self.arcs:
if line in branch_stats:
diff --git a/python/helpers/generator3.py b/python/helpers/generator3.py
index 50e911923a43..d09f6d918e18 100644
--- a/python/helpers/generator3.py
+++ b/python/helpers/generator3.py
@@ -161,6 +161,9 @@ def list_sources(paths):
path = os.path.normpath(path)
+ if path.endswith('.egg') and os.path.isfile(path):
+ say("%s\t%s\t%d", path, path, os.path.getsize(path))
+
for root, files in walk_python_path(path):
for name in files:
if name.endswith('.py'):
diff --git a/python/helpers/pep8.py b/python/helpers/pep8.py
index 2ce7554840cc..f605f189fab3 100644
--- a/python/helpers/pep8.py
+++ b/python/helpers/pep8.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
# pep8.py - Check Python source code formatting, according to PEP 8
# Copyright (C) 2006-2009 Johann C. Rocholl <johann@rocholl.net>
-# Copyright (C) 2009-2013 Florent Xicluna <florent.xicluna@gmail.com>
+# Copyright (C) 2009-2014 Florent Xicluna <florent.xicluna@gmail.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
@@ -24,8 +24,7 @@
# SOFTWARE.
r"""
-Check Python source code formatting, according to PEP 8:
-http://www.python.org/dev/peps/pep-0008/
+Check Python source code formatting, according to PEP 8.
For usage and a list of options, try this:
$ python pep8.py -h
@@ -45,7 +44,9 @@ W warnings
700 statements
900 syntax error
"""
-__version__ = '1.4.5a0'
+from __future__ import with_statement
+
+__version__ = '1.5.7'
import os
import sys
@@ -63,13 +64,13 @@ except ImportError:
from ConfigParser import RawConfigParser
DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__'
-DEFAULT_IGNORE = 'E226,E24'
+DEFAULT_IGNORE = 'E123,E226,E24'
if sys.platform == 'win32':
DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8')
else:
DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or
os.path.expanduser('~/.config'), 'pep8')
-PROJECT_CONFIG = ('.pep8', 'tox.ini', 'setup.cfg')
+PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8')
TESTSUITE_PATH = os.path.join(os.path.dirname(__file__), 'testsuite')
MAX_LINE_LENGTH = 79
REPORT_FORMAT = {
@@ -87,18 +88,21 @@ WS_NEEDED_OPERATORS = frozenset([
'**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', '<', '>',
'%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', '='])
WHITESPACE = frozenset(' \t')
-SKIP_TOKENS = frozenset([tokenize.COMMENT, tokenize.NL, tokenize.NEWLINE,
- tokenize.INDENT, tokenize.DEDENT])
+NEWLINE = frozenset([tokenize.NL, tokenize.NEWLINE])
+SKIP_TOKENS = NEWLINE.union([tokenize.INDENT, tokenize.DEDENT])
+# ERRORTOKEN is triggered by backticks in Python 3
+SKIP_COMMENTS = SKIP_TOKENS.union([tokenize.COMMENT, tokenize.ERRORTOKEN])
BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines']
INDENT_REGEX = re.compile(r'([ \t]*)')
-RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*(,)')
-RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,\s*\w+\s*,\s*\w+')
+RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,')
+RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,.*,\s*\w+\s*$')
ERRORCODE_REGEX = re.compile(r'\b[A-Z]\d{3}\b')
DOCSTRING_REGEX = re.compile(r'u?r?["\']')
EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]')
WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)')
COMPARE_SINGLETON_REGEX = re.compile(r'([=!]=)\s*(None|False|True)')
+COMPARE_NEGATIVE_REGEX = re.compile(r'\b(not)\s+[^[({ ]+\s+(in|is)\s')
COMPARE_TYPE_REGEX = re.compile(r'(?:[=!]=|is(?:\s+not)?)\s*type(?:s.\w+Type'
r'|\s*\(\s*([^)]*[^ )])\s*\))')
KEYWORD_REGEX = re.compile(r'(\s*)\b(?:%s)\b(\s*)' % r'|'.join(KEYWORDS))
@@ -117,8 +121,7 @@ COMMENT_WITH_NL = tokenize.generate_tokens(['#\n'].pop).send(None)[1] == '#\n'
def tabs_or_spaces(physical_line, indent_char):
- r"""
- Never mix tabs and spaces.
+ r"""Never mix tabs and spaces.
The most popular way of indenting Python is with spaces only. The
second-most popular way is with tabs only. Code indented with a mixture
@@ -137,9 +140,7 @@ def tabs_or_spaces(physical_line, indent_char):
def tabs_obsolete(physical_line):
- r"""
- For new projects, spaces-only are strongly recommended over tabs. Most
- editors have features that make this easy to do.
+ r"""For new projects, spaces-only are strongly recommended over tabs.
Okay: if True:\n return
W191: if True:\n\treturn
@@ -150,16 +151,7 @@ def tabs_obsolete(physical_line):
def trailing_whitespace(physical_line):
- r"""
- JCR: Trailing whitespace is superfluous.
- FBM: Except when it occurs as part of a blank line (i.e. the line is
- nothing but whitespace). According to Python docs[1] a line with only
- whitespace is considered a blank line, and is to be ignored. However,
- matching a blank line to its indentation level avoids mistakenly
- terminating a multi-line statement (e.g. class declaration) when
- pasting code into the standard Python interpreter.
-
- [1] http://docs.python.org/reference/lexical_analysis.html#blank-lines
+ r"""Trailing whitespace is superfluous.
The warning returned varies on whether the line itself is blank, for easier
filtering for those who want to indent their blank lines.
@@ -179,30 +171,24 @@ def trailing_whitespace(physical_line):
return 0, "W293 blank line contains whitespace"
-def trailing_blank_lines(physical_line, lines, line_number):
- r"""
- JCR: Trailing blank lines are superfluous.
+def trailing_blank_lines(physical_line, lines, line_number, total_lines):
+ r"""Trailing blank lines are superfluous.
Okay: spam(1)
W391: spam(1)\n
- """
- if not physical_line.rstrip() and line_number == len(lines):
- return 0, "W391 blank line at end of file"
-
-def missing_newline(physical_line):
+ However the last line should end with a new line (warning W292).
"""
- JCR: The last line should have a newline.
-
- Reports warning W292.
- """
- if physical_line.rstrip() == physical_line:
- return len(physical_line), "W292 no newline at end of file"
+ if line_number == total_lines:
+ stripped_last_line = physical_line.rstrip()
+ if not stripped_last_line:
+ return 0, "W391 blank line at end of file"
+ if stripped_last_line == physical_line:
+ return len(physical_line), "W292 no newline at end of file"
-def maximum_line_length(physical_line, max_line_length):
- """
- Limit all lines to a maximum of 79 characters.
+def maximum_line_length(physical_line, max_line_length, multiline):
+ r"""Limit all lines to a maximum of 79 characters.
There are still many devices around that are limited to 80 character
lines; plus, limiting windows to 80 characters makes it possible to have
@@ -215,8 +201,13 @@ def maximum_line_length(physical_line, max_line_length):
"""
line = physical_line.rstrip()
length = len(line)
- if length > max_line_length:
- if noqa(line):
+ if length > max_line_length and not noqa(line):
+ # Special case for long URLs in multi-line docstrings or comments,
+ # but still report the error when the 72 first chars are whitespaces.
+ chunks = line.split()
+ if ((len(chunks) == 1 and multiline) or
+ (len(chunks) == 2 and chunks[0] == '#')) and \
+ len(line) - len(chunks[-1]) < max_line_length - 7:
return
if hasattr(line, 'decode'): # Python 2
# The line could contain multi-byte characters
@@ -235,9 +226,8 @@ def maximum_line_length(physical_line, max_line_length):
def blank_lines(logical_line, blank_lines, indent_level, line_number,
- previous_logical, previous_indent_level):
- r"""
- Separate top-level function and class definitions with two blank lines.
+ blank_before, previous_logical, previous_indent_level):
+ r"""Separate top-level function and class definitions with two blank lines.
Method definitions inside a class are separated by a single blank line.
@@ -265,19 +255,18 @@ def blank_lines(logical_line, blank_lines, indent_level, line_number,
yield 0, "E303 too many blank lines (%d)" % blank_lines
elif logical_line.startswith(('def ', 'class ', '@')):
if indent_level:
- if not (blank_lines or previous_indent_level < indent_level or
+ if not (blank_before or previous_indent_level < indent_level or
DOCSTRING_REGEX.match(previous_logical)):
yield 0, "E301 expected 1 blank line, found 0"
- elif blank_lines != 2:
- yield 0, "E302 expected 2 blank lines, found %d" % blank_lines
+ elif blank_before != 2:
+ yield 0, "E302 expected 2 blank lines, found %d" % blank_before
def extraneous_whitespace(logical_line):
- """
- Avoid extraneous whitespace in the following situations:
+ r"""Avoid extraneous whitespace.
+ Avoid extraneous whitespace in these situations:
- Immediately inside parentheses, brackets or braces.
-
- Immediately before a comma, semicolon, or colon.
Okay: spam(ham[1], {eggs: 2})
@@ -306,8 +295,7 @@ def extraneous_whitespace(logical_line):
def whitespace_around_keywords(logical_line):
- r"""
- Avoid extraneous whitespace around keywords.
+ r"""Avoid extraneous whitespace around keywords.
Okay: True and False
E271: True and False
@@ -330,8 +318,7 @@ def whitespace_around_keywords(logical_line):
def missing_whitespace(logical_line):
- """
- JCR: Each comma, semicolon or colon should be followed by whitespace.
+ r"""Each comma, semicolon or colon should be followed by whitespace.
Okay: [a, b]
Okay: (3,)
@@ -358,8 +345,7 @@ def missing_whitespace(logical_line):
def indentation(logical_line, previous_logical, indent_char,
indent_level, previous_indent_level):
- r"""
- Use 4 spaces per indentation level.
+ r"""Use 4 spaces per indentation level.
For really old code that you don't want to mess up, you can continue to
use 8-space tabs.
@@ -383,16 +369,16 @@ def indentation(logical_line, previous_logical, indent_char,
yield 0, "E113 unexpected indentation"
-def continuation_line_indentation(logical_line, tokens, indent_level, verbose):
- r"""
- Continuation lines should align wrapped elements either vertically using
- Python's implicit line joining inside parentheses, brackets and braces, or
- using a hanging indent.
+def continued_indentation(logical_line, tokens, indent_level, hang_closing,
+ indent_char, noqa, verbose):
+ r"""Continuation lines indentation.
- When using a hanging indent the following considerations should be applied:
+ Continuation lines should align wrapped elements either vertically
+ using Python's implicit line joining inside parentheses, brackets
+ and braces, or using a hanging indent.
+ When using a hanging indent these considerations should be applied:
- there should be no arguments on the first line, and
-
- further indentation should be used to clearly distinguish itself as a
continuation line.
@@ -404,14 +390,16 @@ def continuation_line_indentation(logical_line, tokens, indent_level, verbose):
E122: a = (\n42)
E123: a = (\n 42\n )
E124: a = (24,\n 42\n)
- E125: if (a or\n b):\n pass
+ E125: if (\n b):\n pass
E126: a = (\n 42)
E127: a = (24,\n 42)
E128: a = (24,\n 42)
+ E129: if (a or\n b):\n pass
+ E131: a = (\n 42\n 24)
"""
first_row = tokens[0][2][0]
nrows = 1 + tokens[-1][2][0] - first_row
- if nrows == 1 or noqa(tokens[0][4]):
+ if noqa or nrows == 1:
return
# indent_next tells us whether the next block is indented; assuming
@@ -421,13 +409,20 @@ def continuation_line_indentation(logical_line, tokens, indent_level, verbose):
indent_next = logical_line.endswith(':')
row = depth = 0
+ valid_hangs = (4,) if indent_char != '\t' else (4, 8)
# remember how many brackets were opened on each line
parens = [0] * nrows
# relative indents of physical lines
rel_indent = [0] * nrows
+ # for each depth, collect a list of opening rows
+ open_rows = [[0]]
+ # for each depth, memorize the hanging indentation
+ hangs = [None]
# visual indents
indent_chances = {}
last_indent = tokens[0][2]
+ visual_indent = None
+ # for each depth, memorize the visual indent column
indent = [last_indent[1]]
if verbose >= 3:
print(">>> " + tokens[0][4].rstrip())
@@ -437,8 +432,7 @@ def continuation_line_indentation(logical_line, tokens, indent_level, verbose):
newline = row < start[0] - first_row
if newline:
row = start[0] - first_row
- newline = (not last_token_multiline and
- token_type not in (tokenize.NL, tokenize.NEWLINE))
+ newline = not last_token_multiline and token_type not in NEWLINE
if newline:
# this is the beginning of a continuation line.
@@ -449,51 +443,61 @@ def continuation_line_indentation(logical_line, tokens, indent_level, verbose):
# record the initial indent.
rel_indent[row] = expand_indent(line) - indent_level
- if depth:
- # a bracket expression in a continuation line.
- # find the line that it was opened on
- for open_row in range(row - 1, -1, -1):
- if parens[open_row]:
- break
- else:
- # an unbracketed continuation line (ie, backslash)
- open_row = 0
- hang = rel_indent[row] - rel_indent[open_row]
- visual_indent = indent_chances.get(start[1])
-
- if token_type == tokenize.OP and text in ']})':
- # this line starts with a closing bracket
- if indent[depth]:
- if start[1] != indent[depth]:
- yield (start, "E124 closing bracket does not match "
- "visual indentation")
- elif hang:
+ # identify closing bracket
+ close_bracket = (token_type == tokenize.OP and text in ']})')
+
+ # is the indent relative to an opening bracket line?
+ for open_row in reversed(open_rows[depth]):
+ hang = rel_indent[row] - rel_indent[open_row]
+ hanging_indent = hang in valid_hangs
+ if hanging_indent:
+ break
+ if hangs[depth]:
+ hanging_indent = (hang == hangs[depth])
+ # is there any chance of visual indent?
+ visual_indent = (not close_bracket and hang > 0 and
+ indent_chances.get(start[1]))
+
+ if close_bracket and indent[depth]:
+ # closing bracket for visual indent
+ if start[1] != indent[depth]:
+ yield (start, "E124 closing bracket does not match "
+ "visual indentation")
+ elif close_bracket and not hang:
+ # closing bracket matches indentation of opening bracket's line
+ if hang_closing:
+ yield start, "E133 closing bracket is missing indentation"
+ elif indent[depth] and start[1] < indent[depth]:
+ if visual_indent is not True:
+ # visual indent is broken
+ yield (start, "E128 continuation line "
+ "under-indented for visual indent")
+ elif hanging_indent or (indent_next and rel_indent[row] == 8):
+ # hanging indent is verified
+ if close_bracket and not hang_closing:
yield (start, "E123 closing bracket does not match "
"indentation of opening bracket's line")
+ hangs[depth] = hang
elif visual_indent is True:
# visual indent is verified
- if not indent[depth]:
- indent[depth] = start[1]
+ indent[depth] = start[1]
elif visual_indent in (text, str):
# ignore token lined up with matching one from a previous line
pass
- elif indent[depth] and start[1] < indent[depth]:
- # visual indent is broken
- yield (start, "E128 continuation line "
- "under-indented for visual indent")
- elif hang == 4 or (indent_next and rel_indent[row] == 8):
- # hanging indent is verified
- pass
else:
# indent is broken
if hang <= 0:
error = "E122", "missing indentation or outdented"
elif indent[depth]:
error = "E127", "over-indented for visual indent"
- elif hang % 4:
- error = "E121", "indentation is not a multiple of four"
+ elif not close_bracket and hangs[depth]:
+ error = "E131", "unaligned for hanging indent"
else:
- error = "E126", "over-indented for hanging indent"
+ hangs[depth] = hang
+ if hang > 4:
+ error = "E126", "over-indented for hanging indent"
+ else:
+ error = "E121", "under-indented for hanging indent"
yield start, "%s continuation line %s" % error
# look for visual indenting
@@ -510,12 +514,18 @@ def continuation_line_indentation(logical_line, tokens, indent_level, verbose):
# special case for the "if" statement because len("if (") == 4
elif not indent_chances and not row and not depth and text == 'if':
indent_chances[end[1] + 1] = True
+ elif text == ':' and line[end[1]:].isspace():
+ open_rows[depth].append(row)
# keep track of bracket depth
if token_type == tokenize.OP:
if text in '([{':
depth += 1
indent.append(0)
+ hangs.append(None)
+ if len(open_rows) == depth:
+ open_rows.append([])
+ open_rows[depth].append(row)
parens[row] += 1
if verbose >= 4:
print("bracket depth %s seen, col %s, visual min = %s" %
@@ -523,12 +533,14 @@ def continuation_line_indentation(logical_line, tokens, indent_level, verbose):
elif text in ')]}' and depth > 0:
# parent indents should not be more than this one
prev_indent = indent.pop() or last_indent[1]
+ hangs.pop()
for d in range(depth):
if indent[d] > prev_indent:
indent[d] = 0
for ind in list(indent_chances):
if ind >= prev_indent:
del indent_chances[ind]
+ del open_rows[depth + 1:]
depth -= 1
if depth:
indent_chances[indent[depth]] = True
@@ -542,21 +554,25 @@ def continuation_line_indentation(logical_line, tokens, indent_level, verbose):
indent_chances[start[1]] = text
last_token_multiline = (start[0] != end[0])
+ if last_token_multiline:
+ rel_indent[end[0] - first_row] = rel_indent[row]
- if indent_next and rel_indent[-1] == 4:
- yield (last_indent, "E125 continuation line does not distinguish "
- "itself from next logical line")
+ if indent_next and expand_indent(line) == indent_level + 4:
+ pos = (start[0], indent[0] + 4)
+ if visual_indent:
+ code = "E129 visually indented line"
+ else:
+ code = "E125 continuation line"
+ yield pos, "%s with same indent as next logical line" % code
def whitespace_before_parameters(logical_line, tokens):
- """
- Avoid extraneous whitespace in the following situations:
-
- - Immediately before the open parenthesis that starts the argument
- list of a function call.
+ r"""Avoid extraneous whitespace.
- - Immediately before the open parenthesis that starts an indexing or
- slicing.
+ Avoid extraneous whitespace in the following situations:
+ - before the open parenthesis that starts the argument list of a
+ function call.
+ - before the open parenthesis that starts an indexing or slicing.
Okay: spam(1)
E211: spam (1)
@@ -565,11 +581,9 @@ def whitespace_before_parameters(logical_line, tokens):
E211: dict ['key'] = list[index]
E211: dict['key'] = list [index]
"""
- prev_type = tokens[0][0]
- prev_text = tokens[0][1]
- prev_end = tokens[0][3]
+ prev_type, prev_text, __, prev_end, __ = tokens[0]
for index in range(1, len(tokens)):
- token_type, text, start, end, line = tokens[index]
+ token_type, text, start, end, __ = tokens[index]
if (token_type == tokenize.OP and
text in '([' and
start != prev_end and
@@ -585,11 +599,7 @@ def whitespace_before_parameters(logical_line, tokens):
def whitespace_around_operator(logical_line):
- r"""
- Avoid extraneous whitespace in the following situations:
-
- - More than one space around an assignment (or other) operator to
- align it with another.
+ r"""Avoid extraneous whitespace around an operator.
Okay: a = 12 + 3
E221: a = 4 + 5
@@ -612,13 +622,15 @@ def whitespace_around_operator(logical_line):
def missing_whitespace_around_operator(logical_line, tokens):
- r"""
+ r"""Surround operators with a single space on either side.
+
- Always surround these binary operators with a single space on
either side: assignment (=), augmented assignment (+=, -= etc.),
- comparisons (==, <, >, !=, <>, <=, >=, in, not in, is, is not),
+ comparisons (==, <, >, !=, <=, >=, in, not in, is, is not),
Booleans (and, or, not).
- - Use spaces around arithmetic operators.
+ - If operators with different priorities are used, consider adding
+ whitespace around the operators with the lowest priorities.
Okay: i = i + 1
Okay: submitted += 1
@@ -642,8 +654,7 @@ def missing_whitespace_around_operator(logical_line, tokens):
prev_type = tokenize.OP
prev_text = prev_end = None
for token_type, text, start, end, line in tokens:
- if token_type in (tokenize.NL, tokenize.NEWLINE, tokenize.ERRORTOKEN):
- # ERRORTOKEN is triggered by backticks in Python 3
+ if token_type in SKIP_COMMENTS:
continue
if text in ('(', 'lambda'):
parens += 1
@@ -683,18 +694,9 @@ def missing_whitespace_around_operator(logical_line, tokens):
# Check if the operator is being used as a binary operator
# Allow unary operators: -123, -x, +1.
# Allow argument unpacking: foo(*args, **kwargs).
- if prev_type == tokenize.OP:
- binary_usage = (prev_text in '}])')
- elif prev_type == tokenize.NAME:
- binary_usage = (prev_text not in KEYWORDS)
- else:
- binary_usage = (prev_type not in SKIP_TOKENS)
-
- if binary_usage:
- if text in WS_OPTIONAL_OPERATORS:
- need_space = None
- else:
- need_space = True
+ if (prev_text in '}])' if prev_type == tokenize.OP
+ else prev_text not in KEYWORDS):
+ need_space = None
elif text in WS_OPTIONAL_OPERATORS:
need_space = None
@@ -712,11 +714,7 @@ def missing_whitespace_around_operator(logical_line, tokens):
def whitespace_around_comma(logical_line):
- r"""
- Avoid extraneous whitespace in the following situations:
-
- - More than one space around an assignment (or other) operator to
- align it with another.
+ r"""Avoid extraneous whitespace after a comma or a colon.
Note: these checks are disabled by default
@@ -734,7 +732,8 @@ def whitespace_around_comma(logical_line):
def whitespace_around_named_parameter_equals(logical_line, tokens):
- """
+ r"""Don't use spaces around the '=' sign in function arguments.
+
Don't use spaces around the '=' sign when used to indicate a
keyword argument or a default parameter value.
@@ -753,6 +752,8 @@ def whitespace_around_named_parameter_equals(logical_line, tokens):
prev_end = None
message = "E251 unexpected spaces around keyword / parameter equals"
for token_type, text, start, end, line in tokens:
+ if token_type == tokenize.NL:
+ continue
if no_space:
no_space = False
if start != prev_end:
@@ -769,38 +770,46 @@ def whitespace_around_named_parameter_equals(logical_line, tokens):
prev_end = end
-def whitespace_before_inline_comment(logical_line, tokens):
- """
- Separate inline comments by at least two spaces.
+def whitespace_before_comment(logical_line, tokens):
+ r"""Separate inline comments by at least two spaces.
An inline comment is a comment on the same line as a statement. Inline
comments should be separated by at least two spaces from the statement.
They should start with a # and a single space.
+ Each line of a block comment starts with a # and a single space
+ (unless it is indented text inside the comment).
+
Okay: x = x + 1 # Increment x
Okay: x = x + 1 # Increment x
+ Okay: # Block comment
E261: x = x + 1 # Increment x
E262: x = x + 1 #Increment x
E262: x = x + 1 # Increment x
+ E265: #Block comment
"""
prev_end = (0, 0)
for token_type, text, start, end, line in tokens:
if token_type == tokenize.COMMENT:
- if not line[:start[1]].strip():
- continue
- if prev_end[0] == start[0] and start[1] < prev_end[1] + 2:
- yield (prev_end,
- "E261 at least two spaces before inline comment")
+ inline_comment = line[:start[1]].strip()
+ if inline_comment:
+ if prev_end[0] == start[0] and start[1] < prev_end[1] + 2:
+ yield (prev_end,
+ "E261 at least two spaces before inline comment")
symbol, sp, comment = text.partition(' ')
- if symbol not in ('#', '#:') or comment[:1].isspace():
- yield start, "E262 inline comment should start with '# '"
+ bad_prefix = symbol not in ('#', '#:')
+ if inline_comment:
+ if bad_prefix or comment[:1].isspace():
+ yield start, "E262 inline comment should start with '# '"
+ elif bad_prefix:
+ if text.rstrip('#') and (start[0] > 1 or symbol[1] != '!'):
+ yield start, "E265 block comment should start with '# '"
elif token_type != tokenize.NL:
prev_end = end
def imports_on_separate_lines(logical_line):
- r"""
- Imports should usually be on separate lines.
+ r"""Imports should usually be on separate lines.
Okay: import os\nimport sys
E401: import sys, os
@@ -819,13 +828,11 @@ def imports_on_separate_lines(logical_line):
def compound_statements(logical_line):
- r"""
- Compound statements (multiple statements on the same line) are
- generally discouraged.
+ r"""Compound statements (on the same line) are generally discouraged.
While sometimes it's okay to put an if/for/while with a small body
- on the same line, never do this for multi-clause statements. Also
- avoid folding such long lines!
+ on the same line, never do this for multi-clause statements.
+ Also avoid folding such long lines!
Okay: if foo == 'blah':\n do_blah_thing()
Okay: do_one()
@@ -847,24 +854,25 @@ def compound_statements(logical_line):
line = logical_line
last_char = len(line) - 1
found = line.find(':')
- if -1 < found < last_char:
+ while -1 < found < last_char:
before = line[:found]
if (before.count('{') <= before.count('}') and # {'a': 1} (dict)
before.count('[') <= before.count(']') and # [1:2] (slice)
before.count('(') <= before.count(')') and # (Python 3 annotation)
not LAMBDA_REGEX.search(before)): # lambda x: x
yield found, "E701 multiple statements on one line (colon)"
+ found = line.find(':', found + 1)
found = line.find(';')
- if -1 < found:
+ while -1 < found:
if found < last_char:
yield found, "E702 multiple statements on one line (semicolon)"
else:
yield found, "E703 statement ends with a semicolon"
+ found = line.find(';', found + 1)
def explicit_line_join(logical_line, tokens):
- r"""
- Avoid explicit line join between brackets.
+ r"""Avoid explicit line join between brackets.
The preferred way of wrapping long lines is by using Python's implied line
continuation inside parentheses, brackets and braces. Long lines can be
@@ -897,8 +905,9 @@ def explicit_line_join(logical_line, tokens):
parens -= 1
-def comparison_to_singleton(logical_line):
- """
+def comparison_to_singleton(logical_line, noqa):
+ r"""Comparison to singletons should use "is" or "is not".
+
Comparisons to singletons like None should always be done
with "is" or "is not", never the equality operators.
@@ -911,7 +920,7 @@ def comparison_to_singleton(logical_line):
set to some other value. The other value might have a type (such as a
container) that could be false in a boolean context!
"""
- match = COMPARE_SINGLETON_REGEX.search(logical_line)
+ match = not noqa and COMPARE_SINGLETON_REGEX.search(logical_line)
if match:
same = (match.group(1) == '==')
singleton = match.group(2)
@@ -927,10 +936,31 @@ def comparison_to_singleton(logical_line):
(code, singleton, msg))
-def comparison_type(logical_line):
+def comparison_negative(logical_line):
+ r"""Negative comparison should be done using "not in" and "is not".
+
+ Okay: if x not in y:\n pass
+ Okay: assert (X in Y or X is Z)
+ Okay: if not (X in Y):\n pass
+ Okay: zz = x is not y
+ E713: Z = not X in Y
+ E713: if not X.B in Y:\n pass
+ E714: if not X is Y:\n pass
+ E714: Z = not X.B is Y
"""
- Object type comparisons should always use isinstance() instead of
- comparing types directly.
+ match = COMPARE_NEGATIVE_REGEX.search(logical_line)
+ if match:
+ pos = match.start(1)
+ if match.group(2) == 'in':
+ yield pos, "E713 test for membership should be 'not in'"
+ else:
+ yield pos, "E714 test for object identity should be 'is not'"
+
+
+def comparison_type(logical_line):
+ r"""Object type comparisons should always use isinstance().
+
+ Do not compare types directly.
Okay: if isinstance(obj, int):
E721: if type(obj) is type(1):
@@ -947,44 +977,36 @@ def comparison_type(logical_line):
inst = match.group(1)
if inst and isidentifier(inst) and inst not in SINGLETONS:
return # Allow comparison for types which are not obvious
- yield match.start(0), "E721 do not compare types, use 'isinstance()'"
+ yield match.start(), "E721 do not compare types, use 'isinstance()'"
-def python_3000_has_key(logical_line):
- r"""
- The {}.has_key() method is removed in the Python 3.
- Use the 'in' operation instead.
+def python_3000_has_key(logical_line, noqa):
+ r"""The {}.has_key() method is removed in Python 3: use the 'in' operator.
Okay: if "alph" in d:\n print d["alph"]
W601: assert d.has_key('alph')
"""
pos = logical_line.find('.has_key(')
- if pos > -1:
+ if pos > -1 and not noqa:
yield pos, "W601 .has_key() is deprecated, use 'in'"
def python_3000_raise_comma(logical_line):
- """
- When raising an exception, use "raise ValueError('message')"
- instead of the older form "raise ValueError, 'message'".
+ r"""When raising an exception, use "raise ValueError('message')".
- The paren-using form is preferred because when the exception arguments
- are long or include string formatting, you don't need to use line
- continuation characters thanks to the containing parentheses. The older
- form is removed in Python 3.
+ The older form is removed in Python 3.
Okay: raise DummyError("Message")
W602: raise DummyError, "Message"
"""
match = RAISE_COMMA_REGEX.match(logical_line)
if match and not RERAISE_COMMA_REGEX.match(logical_line):
- yield match.start(1), "W602 deprecated form of raising exception"
+ yield match.end() - 1, "W602 deprecated form of raising exception"
def python_3000_not_equal(logical_line):
- """
- != can also be written <>, but this is an obsolete usage kept for
- backwards compatibility only. New code should always use !=.
+ r"""New code should always use != instead of <>.
+
The older syntax is removed in Python 3.
Okay: if a != 'no':
@@ -996,9 +1018,7 @@ def python_3000_not_equal(logical_line):
def python_3000_backticks(logical_line):
- """
- Backticks are removed in Python 3.
- Use repr() instead.
+ r"""Backticks are removed in Python 3: use repr() instead.
Okay: val = repr(1 + 2)
W604: val = `1 + 2`
@@ -1016,49 +1036,40 @@ def python_3000_backticks(logical_line):
if '' == ''.encode():
# Python 2: implicit encoding.
def readlines(filename):
- f = open(filename)
- try:
+ """Read the source code."""
+ with open(filename, 'rU') as f:
return f.readlines()
- finally:
- f.close()
-
isidentifier = re.compile(r'[a-zA-Z_]\w*').match
stdin_get_value = sys.stdin.read
else:
# Python 3
def readlines(filename):
- f = open(filename, 'rb')
+ """Read the source code."""
try:
- coding, lines = tokenize.detect_encoding(f.readline)
- f = TextIOWrapper(f, coding, line_buffering=True)
- return [l.decode(coding) for l in lines] + f.readlines()
+ with open(filename, 'rb') as f:
+ (coding, lines) = tokenize.detect_encoding(f.readline)
+ f = TextIOWrapper(f, coding, line_buffering=True)
+ return [l.decode(coding) for l in lines] + f.readlines()
except (LookupError, SyntaxError, UnicodeError):
- f.close()
- # Fall back if files are improperly declared
- f = open(filename, encoding='latin-1')
- return f.readlines()
- finally:
- f.close()
-
+ # Fall back if file encoding is improperly declared
+ with open(filename, encoding='latin-1') as f:
+ return f.readlines()
isidentifier = str.isidentifier
def stdin_get_value():
return TextIOWrapper(sys.stdin.buffer, errors='ignore').read()
-readlines.__doc__ = " Read the source code."
noqa = re.compile(r'# no(?:qa|pep8)\b', re.I).search
def expand_indent(line):
- r"""
- Return the amount of indentation.
+ r"""Return the amount of indentation.
+
Tabs are expanded to the next multiple of 8.
>>> expand_indent(' ')
4
>>> expand_indent('\t')
8
- >>> expand_indent(' \t')
- 8
>>> expand_indent(' \t')
8
>>> expand_indent(' \t')
@@ -1078,8 +1089,7 @@ def expand_indent(line):
def mute_string(text):
- """
- Replace contents with 'xxx' to prevent syntax matching.
+ """Replace contents with 'xxx' to prevent syntax matching.
>>> mute_string('"abc"')
'"xxx"'
@@ -1111,7 +1121,7 @@ def parse_udiff(diff, patterns=None, parent='.'):
continue
if line[:3] == '@@ ':
hunk_match = HUNK_REGEX.match(line)
- row, nrows = [int(g or '1') for g in hunk_match.groups()]
+ (row, nrows) = [int(g or '1') for g in hunk_match.groups()]
rv[path].update(range(row, row + nrows))
elif line[:3] == '+++':
path = line[4:].split('\t', 1)[0]
@@ -1123,9 +1133,24 @@ def parse_udiff(diff, patterns=None, parent='.'):
if rows and filename_match(path, patterns)])
-def filename_match(filename, patterns, default=True):
+def normalize_paths(value, parent=os.curdir):
+ """Parse a comma-separated list of paths.
+
+ Return a list of absolute paths.
"""
- Check if patterns contains a pattern that matches filename.
+ if not value or isinstance(value, list):
+ return value
+ paths = []
+ for path in value.split(','):
+ if '/' in path:
+ path = os.path.abspath(os.path.join(parent, path))
+ paths.append(path.rstrip('/'))
+ return paths
+
+
+def filename_match(filename, patterns, default=True):
+ """Check if patterns contains a pattern that matches filename.
+
If patterns is unspecified, this always returns True.
"""
if not patterns:
@@ -1133,6 +1158,15 @@ def filename_match(filename, patterns, default=True):
return any(fnmatch(filename, pattern) for pattern in patterns)
+if COMMENT_WITH_NL:
+ def _is_eol_token(token):
+ return (token[0] in NEWLINE or
+ (token[0] == tokenize.COMMENT and token[1] == token[4]))
+else:
+ def _is_eol_token(token):
+ return token[0] in NEWLINE
+
+
##############################################################################
# Framework to run all checks
##############################################################################
@@ -1142,9 +1176,7 @@ _checks = {'physical_line': {}, 'logical_line': {}, 'tree': {}}
def register_check(check, codes=None):
- """
- Register a new check object.
- """
+ """Register a new check object."""
def _add_check(check, kind, codes, args):
if check in _checks[kind]:
_checks[kind][check][0].extend(codes or [])
@@ -1162,9 +1194,9 @@ def register_check(check, codes=None):
def init_checks_registry():
- """
- Register all globally visible functions where the first argument name
- is 'physical_line' or 'logical_line'.
+ """Register all globally visible functions.
+
+ The first argument name is either 'physical_line' or 'logical_line'.
"""
mod = inspect.getmodule(register_check)
for (name, function) in inspect.getmembers(mod, inspect.isfunction):
@@ -1173,9 +1205,7 @@ init_checks_registry()
class Checker(object):
- """
- Load a Python source file, tokenize it, check coding style.
- """
+ """Load a Python source file, tokenize it, check coding style."""
def __init__(self, filename=None, lines=None,
options=None, report=None, **kwargs):
@@ -1188,6 +1218,8 @@ class Checker(object):
self._logical_checks = options.logical_checks
self._ast_checks = options.ast_checks
self.max_line_length = options.max_line_length
+ self.multiline = False # in a multiline string?
+ self.hang_closing = options.hang_closing
self.verbose = options.verbose
self.filename = filename
if filename is None:
@@ -1200,161 +1232,190 @@ class Checker(object):
try:
self.lines = readlines(filename)
except IOError:
- exc_type, exc = sys.exc_info()[:2]
+ (exc_type, exc) = sys.exc_info()[:2]
self._io_error = '%s: %s' % (exc_type.__name__, exc)
self.lines = []
else:
self.lines = lines
+ if self.lines:
+ ord0 = ord(self.lines[0][0])
+ if ord0 in (0xef, 0xfeff): # Strip the UTF-8 BOM
+ if ord0 == 0xfeff:
+ self.lines[0] = self.lines[0][1:]
+ elif self.lines[0][:3] == '\xef\xbb\xbf':
+ self.lines[0] = self.lines[0][3:]
self.report = report or options.report
self.report_error = self.report.error
def report_invalid_syntax(self):
- exc_type, exc = sys.exc_info()[:2]
- offset = exc.args[1]
- if len(offset) > 2:
- offset = offset[1:3]
+ """Check if the syntax is valid."""
+ (exc_type, exc) = sys.exc_info()[:2]
+ if len(exc.args) > 1:
+ offset = exc.args[1]
+ if len(offset) > 2:
+ offset = offset[1:3]
+ else:
+ offset = (1, 0)
self.report_error(offset[0], offset[1] or 0,
'E901 %s: %s' % (exc_type.__name__, exc.args[0]),
self.report_invalid_syntax)
- report_invalid_syntax.__doc__ = " Check if the syntax is valid."
def readline(self):
- """
- Get the next line from the input buffer.
- """
- self.line_number += 1
- if self.line_number > len(self.lines):
+ """Get the next line from the input buffer."""
+ if self.line_number >= self.total_lines:
return ''
- return self.lines[self.line_number - 1]
-
- def readline_check_physical(self):
- """
- Check and return the next physical line. This method can be
- used to feed tokenize.generate_tokens.
- """
- line = self.readline()
- if line:
- self.check_physical(line)
+ line = self.lines[self.line_number]
+ self.line_number += 1
+ if self.indent_char is None and line[:1] in WHITESPACE:
+ self.indent_char = line[0]
return line
def run_check(self, check, argument_names):
- """
- Run a check plugin.
- """
+ """Run a check plugin."""
arguments = []
for name in argument_names:
arguments.append(getattr(self, name))
return check(*arguments)
def check_physical(self, line):
- """
- Run all physical checks on a raw input line.
- """
+ """Run all physical checks on a raw input line."""
self.physical_line = line
- if self.indent_char is None and line[:1] in WHITESPACE:
- self.indent_char = line[0]
for name, check, argument_names in self._physical_checks:
result = self.run_check(check, argument_names)
if result is not None:
- offset, text = result
+ (offset, text) = result
self.report_error(self.line_number, offset, text, check)
+ if text[:4] == 'E101':
+ self.indent_char = line[0]
def build_tokens_line(self):
- """
- Build a logical line from tokens.
- """
- self.mapping = []
+ """Build a logical line from tokens."""
logical = []
+ comments = []
length = 0
- previous = None
- for token in self.tokens:
- token_type, text = token[0:2]
+ prev_row = prev_col = mapping = None
+ for token_type, text, start, end, line in self.tokens:
if token_type in SKIP_TOKENS:
continue
+ if not mapping:
+ mapping = [(0, start)]
+ if token_type == tokenize.COMMENT:
+ comments.append(text)
+ continue
if token_type == tokenize.STRING:
text = mute_string(text)
- if previous:
- end_row, end = previous[3]
- start_row, start = token[2]
- if end_row != start_row: # different row
- prev_text = self.lines[end_row - 1][end - 1]
+ if prev_row:
+ (start_row, start_col) = start
+ if prev_row != start_row: # different row
+ prev_text = self.lines[prev_row - 1][prev_col - 1]
if prev_text == ',' or (prev_text not in '{[('
and text not in '}])'):
- logical.append(' ')
- length += 1
- elif end != start: # different column
- fill = self.lines[end_row - 1][end:start]
- logical.append(fill)
- length += len(fill)
- self.mapping.append((length, token))
+ text = ' ' + text
+ elif prev_col != start_col: # different column
+ text = line[prev_col:start_col] + text
logical.append(text)
length += len(text)
- previous = token
+ mapping.append((length, end))
+ (prev_row, prev_col) = end
self.logical_line = ''.join(logical)
- # With Python 2, if the line ends with '\r\r\n' the assertion fails
- # assert self.logical_line.strip() == self.logical_line
+ self.noqa = comments and noqa(''.join(comments))
+ return mapping
def check_logical(self):
- """
- Build a line from tokens and run all logical checks on it.
- """
- self.build_tokens_line()
+ """Build a line from tokens and run all logical checks on it."""
self.report.increment_logical_line()
- first_line = self.lines[self.mapping[0][1][2][0] - 1]
- indent = first_line[:self.mapping[0][1][2][1]]
- self.previous_indent_level = self.indent_level
- self.indent_level = expand_indent(indent)
+ mapping = self.build_tokens_line()
+ (start_row, start_col) = mapping[0][1]
+ start_line = self.lines[start_row - 1]
+ self.indent_level = expand_indent(start_line[:start_col])
+ if self.blank_before < self.blank_lines:
+ self.blank_before = self.blank_lines
if self.verbose >= 2:
print(self.logical_line[:80].rstrip())
for name, check, argument_names in self._logical_checks:
if self.verbose >= 4:
print(' ' + name)
- for result in self.run_check(check, argument_names):
- offset, text = result
- if isinstance(offset, tuple):
- orig_number, orig_offset = offset
- else:
- for token_offset, token in self.mapping:
- if offset >= token_offset:
- orig_number = token[2][0]
- orig_offset = (token[2][1] + offset - token_offset)
- self.report_error(orig_number, orig_offset, text, check)
- self.previous_logical = self.logical_line
+ for offset, text in self.run_check(check, argument_names) or ():
+ if not isinstance(offset, tuple):
+ for token_offset, pos in mapping:
+ if offset <= token_offset:
+ break
+ offset = (pos[0], pos[1] + offset - token_offset)
+ self.report_error(offset[0], offset[1], text, check)
+ if self.logical_line:
+ self.previous_indent_level = self.indent_level
+ self.previous_logical = self.logical_line
+ self.blank_lines = 0
+ self.tokens = []
def check_ast(self):
+ """Build the file's AST and run all AST checks."""
try:
tree = compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST)
- except SyntaxError:
+ except (SyntaxError, TypeError):
return self.report_invalid_syntax()
- for name, cls, _ in self._ast_checks:
+ for name, cls, __ in self._ast_checks:
checker = cls(tree, self.filename)
for lineno, offset, text, check in checker.run():
- if not noqa(self.lines[lineno - 1]):
+ if not self.lines or not noqa(self.lines[lineno - 1]):
self.report_error(lineno, offset, text, check)
def generate_tokens(self):
+ """Tokenize the file, run physical line checks and yield tokens."""
if self._io_error:
self.report_error(1, 0, 'E902 %s' % self._io_error, readlines)
- tokengen = tokenize.generate_tokens(self.readline_check_physical)
+ tokengen = tokenize.generate_tokens(self.readline)
try:
for token in tokengen:
+ if token[2][0] > self.total_lines:
+ return
+ self.maybe_check_physical(token)
yield token
except (SyntaxError, tokenize.TokenError):
self.report_invalid_syntax()
+ def maybe_check_physical(self, token):
+ """If appropriate (based on token), check current physical line(s)."""
+ # Called after every token, but act only on end of line.
+ if _is_eol_token(token):
+ # Obviously, a newline token ends a single physical line.
+ self.check_physical(token[4])
+ elif token[0] == tokenize.STRING and '\n' in token[1]:
+ # Less obviously, a string that contains newlines is a
+ # multiline string, either triple-quoted or with internal
+ # newlines backslash-escaped. Check every physical line in the
+ # string *except* for the last one: its newline is outside of
+ # the multiline string, so we consider it a regular physical
+ # line, and will check it like any other physical line.
+ #
+ # Subtleties:
+ # - we don't *completely* ignore the last line; if it contains
+ # the magical "# noqa" comment, we disable all physical
+ # checks for the entire multiline string
+ # - have to wind self.line_number back because initially it
+ # points to the last line of the string, and we want
+ # check_physical() to give accurate feedback
+ if noqa(token[4]):
+ return
+ self.multiline = True
+ self.line_number = token[2][0]
+ for line in token[1].split('\n')[:-1]:
+ self.check_physical(line + '\n')
+ self.line_number += 1
+ self.multiline = False
+
def check_all(self, expected=None, line_offset=0):
- """
- Run all checks on the input file.
- """
+ """Run all checks on the input file."""
self.report.init_file(self.filename, self.lines, expected, line_offset)
+ self.total_lines = len(self.lines)
if self._ast_checks:
self.check_ast()
self.line_number = 0
self.indent_char = None
- self.indent_level = 0
+ self.indent_level = self.previous_indent_level = 0
self.previous_logical = ''
self.tokens = []
- self.blank_lines = blank_lines_before_comment = 0
+ self.blank_lines = self.blank_before = 0
parens = 0
for token in self.generate_tokens():
self.tokens.append(token)
@@ -1372,29 +1433,33 @@ class Checker(object):
elif text in '}])':
parens -= 1
elif not parens:
- if token_type == tokenize.NEWLINE:
- if self.blank_lines < blank_lines_before_comment:
- self.blank_lines = blank_lines_before_comment
- self.check_logical()
- self.tokens = []
- self.blank_lines = blank_lines_before_comment = 0
- elif token_type == tokenize.NL:
- if len(self.tokens) == 1:
+ if token_type in NEWLINE:
+ if token_type == tokenize.NEWLINE:
+ self.check_logical()
+ self.blank_before = 0
+ elif len(self.tokens) == 1:
# The physical line contains only this token.
self.blank_lines += 1
- self.tokens = []
- elif token_type == tokenize.COMMENT and len(self.tokens) == 1:
- if blank_lines_before_comment < self.blank_lines:
- blank_lines_before_comment = self.blank_lines
- self.blank_lines = 0
- if COMMENT_WITH_NL:
+ del self.tokens[0]
+ else:
+ self.check_logical()
+ elif COMMENT_WITH_NL and token_type == tokenize.COMMENT:
+ if len(self.tokens) == 1:
# The comment also ends a physical line
- self.tokens = []
+ token = list(token)
+ token[1] = text.rstrip('\r\n')
+ token[3] = (token[2][0], token[2][1] + len(token[1]))
+ self.tokens = [tuple(token)]
+ self.check_logical()
+ if self.tokens:
+ self.check_physical(self.lines[-1])
+ self.check_logical()
return self.report.get_file_results()
class BaseReport(object):
"""Collect the results of the checks."""
+
print_filename = False
def __init__(self, options):
@@ -1457,8 +1522,7 @@ class BaseReport(object):
for key in self.messages if key.startswith(prefix)])
def get_statistics(self, prefix=''):
- """
- Get statistics for message codes that start with the prefix.
+ """Get statistics for message codes that start with the prefix.
prefix='' matches all errors and warnings
prefix='E' matches all errors
@@ -1529,9 +1593,9 @@ class StandardReport(BaseReport):
else:
line = self.lines[line_number - 1]
print(line.rstrip())
- print(' ' * offset + '^')
+ print(re.sub(r'\S', ' ', line[:offset]) + '^')
if self._show_pep8 and doc:
- print(doc.lstrip('\n').rstrip())
+ print(' ' + doc.strip())
return self.file_errors
@@ -1557,11 +1621,12 @@ class StyleGuide(object):
parse_argv = kwargs.pop('parse_argv', False)
config_file = kwargs.pop('config_file', None)
parser = kwargs.pop('parser', None)
+ # build options from dict
+ options_dict = dict(*args, **kwargs)
+ arglist = None if parse_argv else options_dict.get('paths', None)
options, self.paths = process_options(
- parse_argv=parse_argv, config_file=config_file, parser=parser)
- if args or kwargs:
- # build options from dict
- options_dict = dict(*args, **kwargs)
+ arglist, parse_argv, config_file, parser)
+ if options_dict:
options.__dict__.update(options_dict)
if 'paths' in options_dict:
self.paths = options_dict['paths']
@@ -1572,11 +1637,14 @@ class StyleGuide(object):
if not options.reporter:
options.reporter = BaseReport if options.quiet else StandardReport
- for index, value in enumerate(options.exclude):
- options.exclude[index] = value.rstrip('/')
- # Ignore all checks which are not explicitly selected
options.select = tuple(options.select or ())
- options.ignore = tuple(options.ignore or options.select and ('',))
+ if not (options.select or options.ignore or
+ options.testsuite or options.doctest) and DEFAULT_IGNORE:
+ # The default choice: ignore controversial checks
+ options.ignore = tuple(DEFAULT_IGNORE.split(','))
+ else:
+ # Ignore all checks which are not explicitly selected
+ options.ignore = ('',) if options.select else tuple(options.ignore)
options.benchmark_keys = BENCHMARK_KEYS[:]
options.ignore_code = self.ignore_code
options.physical_checks = self.get_checks('physical_line')
@@ -1629,37 +1697,45 @@ class StyleGuide(object):
print('directory ' + root)
counters['directories'] += 1
for subdir in sorted(dirs):
- if self.excluded(os.path.join(root, subdir)):
+ if self.excluded(subdir, root):
dirs.remove(subdir)
for filename in sorted(files):
# contain a pattern that matches?
if ((filename_match(filename, filepatterns) and
- not self.excluded(filename))):
+ not self.excluded(filename, root))):
runner(os.path.join(root, filename))
- def excluded(self, filename):
- """
- Check if options.exclude contains a pattern that matches filename.
+ def excluded(self, filename, parent=None):
+ """Check if the file should be excluded.
+
+ Check if 'options.exclude' contains a pattern that matches filename.
"""
+ if not self.options.exclude:
+ return False
basename = os.path.basename(filename)
- return any((filename_match(filename, self.options.exclude,
- default=False),
- filename_match(basename, self.options.exclude,
- default=False)))
+ if filename_match(basename, self.options.exclude):
+ return True
+ if parent:
+ filename = os.path.join(parent, filename)
+ filename = os.path.abspath(filename)
+ return filename_match(filename, self.options.exclude)
def ignore_code(self, code):
- """
- Check if the error code should be ignored.
+ """Check if the error code should be ignored.
If 'options.select' contains a prefix of the error code,
return False. Else, if 'options.ignore' contains a prefix of
the error code, return True.
"""
+ if len(code) < 4 and any(s.startswith(code)
+ for s in self.options.select):
+ return False
return (code.startswith(self.options.ignore) and
not code.startswith(self.options.select))
def get_checks(self, argument_name):
- """
+ """Get all the checks for this category.
+
Find all globally visible functions where the first argument name
starts with argument_name and which contain selected tests.
"""
@@ -1675,8 +1751,9 @@ def get_parser(prog='pep8', version=__version__):
parser = OptionParser(prog=prog, version=version,
usage="%prog [options] input ...")
parser.config_options = [
- 'exclude', 'filename', 'select', 'ignore', 'max-line-length', 'count',
- 'format', 'quiet', 'show-pep8', 'show-source', 'statistics', 'verbose']
+ 'exclude', 'filename', 'select', 'ignore', 'max-line-length',
+ 'hang-closing', 'count', 'format', 'quiet', 'show-pep8',
+ 'show-source', 'statistics', 'verbose']
parser.add_option('-v', '--verbose', default=0, action='count',
help="print status messages, or debug with -vv")
parser.add_option('-q', '--quiet', default=0, action='count',
@@ -1711,6 +1788,9 @@ def get_parser(prog='pep8', version=__version__):
default=MAX_LINE_LENGTH,
help="set maximum allowed line length "
"(default: %default)")
+ parser.add_option('--hang-closing', action='store_true',
+ help="hang closing bracket instead of matching "
+ "indentation of opening bracket's line")
parser.add_option('--format', metavar='format', default='default',
help="set the error format [default|pylint|<custom>]")
parser.add_option('--diff', action='store_true',
@@ -1737,19 +1817,15 @@ def read_config(options, args, arglist, parser):
print('user configuration: %s' % user_conf)
config.read(user_conf)
+ local_dir = os.curdir
parent = tail = args and os.path.abspath(os.path.commonprefix(args))
while tail:
- for name in PROJECT_CONFIG:
- local_conf = os.path.join(parent, name)
- if os.path.isfile(local_conf):
- break
- else:
- parent, tail = os.path.split(parent)
- continue
- if options.verbose:
- print('local configuration: %s' % local_conf)
- config.read(local_conf)
- break
+ if config.read([os.path.join(parent, fn) for fn in PROJECT_CONFIG]):
+ local_dir = parent
+ if options.verbose:
+ print('local configuration: in %s' % parent)
+ break
+ (parent, tail) = os.path.split(parent)
pep8_section = parser.prog
if config.has_section(pep8_section):
@@ -1757,29 +1833,30 @@ def read_config(options, args, arglist, parser):
for o in parser.option_list])
# First, read the default values
- new_options, _ = parser.parse_args([])
+ (new_options, __) = parser.parse_args([])
# Second, parse the configuration
for opt in config.options(pep8_section):
+ if opt.replace('_', '-') not in parser.config_options:
+ print(" unknown option '%s' ignored" % opt)
+ continue
if options.verbose > 1:
print(" %s = %s" % (opt, config.get(pep8_section, opt)))
- if opt.replace('_', '-') not in parser.config_options:
- print("Unknown option: '%s'\n not in [%s]" %
- (opt, ' '.join(parser.config_options)))
- sys.exit(1)
normalized_opt = opt.replace('-', '_')
opt_type = option_list[normalized_opt]
if opt_type in ('int', 'count'):
value = config.getint(pep8_section, opt)
elif opt_type == 'string':
value = config.get(pep8_section, opt)
+ if normalized_opt == 'exclude':
+ value = normalize_paths(value, local_dir)
else:
assert opt_type in ('store_true', 'store_false')
value = config.getboolean(pep8_section, opt)
setattr(new_options, normalized_opt, value)
# Third, overwrite with the command-line options
- options, _ = parser.parse_args(arglist, values=new_options)
+ (options, __) = parser.parse_args(arglist, values=new_options)
options.doctest = options.testsuite = False
return options
@@ -1787,9 +1864,6 @@ def read_config(options, args, arglist, parser):
def process_options(arglist=None, parse_argv=False, config_file=None,
parser=None):
"""Process options passed either via arglist or via command line args."""
- if not arglist and not parse_argv:
- # Don't read the command line if the module is used as a library.
- arglist = []
if not parser:
parser = get_parser()
if not parser.has_option('--config'):
@@ -1802,7 +1876,12 @@ def process_options(arglist=None, parse_argv=False, config_file=None,
(parser.prog, ', '.join(parser.config_options))))
group.add_option('--config', metavar='path', default=config_file,
help="user config file location (default: %default)")
- options, args = parser.parse_args(arglist)
+ # Don't read the command line if the module is used as a library.
+ if not arglist and not parse_argv:
+ arglist = []
+ # If parse_argv is True and arglist is None, arguments are
+ # parsed from the command line (sys.argv)
+ (options, args) = parser.parse_args(arglist)
options.reporter = None
if options.ensure_value('testsuite', False):
@@ -1817,18 +1896,10 @@ def process_options(arglist=None, parse_argv=False, config_file=None,
options = read_config(options, args, arglist, parser)
options.reporter = parse_argv and options.quiet == 1 and FileReport
- if options.filename:
- options.filename = options.filename.split(',')
- options.exclude = options.exclude.split(',')
- if options.select:
- options.select = options.select.split(',')
- if options.ignore:
- options.ignore = options.ignore.split(',')
- elif not (options.select or
- options.testsuite or options.doctest) and DEFAULT_IGNORE:
- # The default choice: ignore controversial checks
- # (for doctest and testsuite, all checks are required)
- options.ignore = DEFAULT_IGNORE.split(',')
+ options.filename = options.filename and options.filename.split(',')
+ options.exclude = normalize_paths(options.exclude)
+ options.select = options.select and options.select.split(',')
+ options.ignore = options.ignore and options.ignore.split(',')
if options.diff:
options.reporter = DiffReport
@@ -1841,13 +1912,19 @@ def process_options(arglist=None, parse_argv=False, config_file=None,
def _main():
"""Parse options and run checks on Python source."""
+ import signal
+
+ # Handle "Broken pipe" gracefully
+ try:
+ signal.signal(signal.SIGPIPE, lambda signum, frame: sys.exit(1))
+ except AttributeError:
+ pass # not supported on Windows
+
pep8style = StyleGuide(parse_argv=True, config_file=True)
options = pep8style.options
if options.doctest or options.testsuite:
- sys.path[:0] = [TESTSUITE_PATH]
- from test_pep8 import run_tests
- del sys.path[0]
- report = run_tests(pep8style, options.doctest, options.testsuite)
+ from testsuite.support import run_tests
+ report = run_tests(pep8style)
else:
report = pep8style.check_files()
if options.statistics:
diff --git a/python/helpers/pycharm/lettuce_runner.py b/python/helpers/pycharm/lettuce_runner.py
new file mode 100644
index 000000000000..6aaa566df719
--- /dev/null
+++ b/python/helpers/pycharm/lettuce_runner.py
@@ -0,0 +1,132 @@
+# coding=utf-8
+"""
+BDD lettuce framework runner
+"""
+__author__ = 'Ilya.Kazakevich'
+import os
+from lettuce.exceptions import ReasonToFail
+import time
+import sys
+import tcmessages
+import lettuce
+from lettuce import core
+
+
+# Error message about unsupported outlines
+_NO_OUTLINE_ERROR = "Outline scenarios are not supported due to https://github.com/gabrielfalcao/lettuce/issues/451"
+
+
+class LettuceRunner(object):
+ """
+ TODO: Runs lettuce
+ """
+
+ def __init__(self, base_dir):
+ """
+ :param base_dir base directory to run tests in
+ :type base_dir: str
+
+ """
+ self.base_dir = base_dir
+ self.runner = lettuce.Runner(base_dir)
+ self.messages = tcmessages.TeamcityServiceMessages()
+ self.test_start_time = None
+
+ def report_tests(self):
+ """
+ :returns : number of tests
+ :rtype : int
+ """
+ result = 0
+ for feature_file in self.runner.loader.find_feature_files():
+ feature = core.Feature.from_file(feature_file)
+ for scenario in feature.scenarios:
+ assert isinstance(scenario, core.Scenario), scenario
+ if not scenario.outlines:
+ result += len(scenario.steps)
+ self.messages.testCount(result)
+
+ def report_scenario_started(self, scenario):
+ """
+ Reports scenario launched
+ :type scenario core.Scenario
+ :param scenario: scenario
+ """
+ if scenario.outlines:
+ self.messages.testIgnored(scenario.name,
+ _NO_OUTLINE_ERROR)
+ scenario.steps = [] # Clear to prevent running. TODO: Fix when this issue fixed
+ scenario.background = None # TODO: undocumented
+ return
+ self.report_suite(True, scenario.name, scenario.described_at)
+
+ def report_suite(self, is_start, name, described_at):
+ """
+ Reports some suite (scenario, feature, background etc) is started or stopped
+ :param is_start: started or not
+ :param name: suite name
+ :param described_at: where it is described (file, line)
+ :return:
+ """
+ if is_start:
+ self.messages.testSuiteStarted(name, self._gen_location(described_at))
+ else:
+ self.messages.testSuiteFinished(name)
+
+ def report_step(self, is_start, step):
+ """
+ Reports step start / stop
+ :param is_start: true if step started
+ :type step core.Step
+ :param step: step
+ """
+ test_name = step.sentence
+ if is_start:
+ self.test_start_time = time.time()
+ self.messages.testStarted(test_name, self._gen_location(step.described_at))
+ elif step.passed:
+ duration = 0
+ if self.test_start_time:
+ duration = long(time.time() - self.test_start_time)
+ self.messages.testFinished(test_name, duration=duration)
+ self.test_start_time = None
+ elif step.failed:
+ reason = step.why
+ assert isinstance(reason, ReasonToFail), reason
+ self.messages.testFailed(test_name, message=reason.exception, details=reason.traceback)
+
+ def _gen_location(self, description):
+ """
+ :param description: "described_at" (file, line)
+ :return: location in format file:line by "described_at"
+ """
+ return "file:///{}/{}:{}".format(self.base_dir, description.file, description.line)
+
+ def run(self):
+ """
+ Launches runner
+ """
+ self.report_tests()
+ self.messages.testMatrixEntered()
+
+ lettuce.before.each_feature(lambda f: self.report_suite(True, f.name, f.described_at))
+ lettuce.after.each_feature(lambda f: self.report_suite(False, f.name, f.described_at))
+
+ lettuce.before.each_scenario(lambda s: self.report_scenario_started(s))
+ lettuce.after.each_scenario(lambda s: self.report_suite(False, s.name, s.described_at))
+
+ lettuce.before.each_background(
+ lambda b, *args: self.report_suite(True, "Scenario background", b.feature.described_at))
+ lettuce.after.each_background(
+ lambda b, *args: self.report_suite(False, "Scenario background", b.feature.described_at))
+
+ lettuce.before.each_step(lambda s: self.report_step(True, s))
+ lettuce.after.each_step(lambda s: self.report_step(False, s))
+
+ self.runner.run()
+
+
+if __name__ == "__main__":
+ path = sys.argv[1] if len(sys.argv) > 1 else "."
+ assert os.path.exists(path), "{} does not exist".format(path)
+ LettuceRunner(path).run() \ No newline at end of file
diff --git a/python/helpers/pycharm_generator_utils/module_redeclarator.py b/python/helpers/pycharm_generator_utils/module_redeclarator.py
index 3af1961a0e09..0dbdbb58ac6b 100644
--- a/python/helpers/pycharm_generator_utils/module_redeclarator.py
+++ b/python/helpers/pycharm_generator_utils/module_redeclarator.py
@@ -1007,6 +1007,8 @@ class ModuleRedeclarator(object):
if self.doing_builtins and p_name == BUILTIN_MOD_NAME:
txt = create_generator()
self.classes_buf.out(0, txt)
+ txt = create_function()
+ self.classes_buf.out(0, txt)
# Fake <type 'namedtuple'>
if version[0] >= 3 or (version[0] == 2 and version[1] >= 6):
diff --git a/python/helpers/pycharm_generator_utils/util_methods.py b/python/helpers/pycharm_generator_utils/util_methods.py
index d679e3385264..b6805c4a5942 100644
--- a/python/helpers/pycharm_generator_utils/util_methods.py
+++ b/python/helpers/pycharm_generator_utils/util_methods.py
@@ -75,6 +75,47 @@ class __generator(object):
"""
return txt
+def create_function():
+ txt = """
+class __function(object):
+ '''A mock class representing function type.'''
+
+ def __init__(self):
+ self.__name__ = ''
+ self.__doc__ = ''
+ self.__dict__ = ''
+ self.__module__ = ''
+"""
+ if version[0] == 2:
+ txt += """
+ self.func_defaults = {}
+ self.func_globals = {}
+ self.func_closure = None
+ self.func_code = None
+ self.func_name = ''
+ self.func_doc = ''
+ self.func_dict = ''
+"""
+ if version[0] >= 3 or (version[0] == 2 and version[1] >= 6):
+ txt += """
+ self.__defaults__ = {}
+ self.__globals__ = {}
+ self.__closure__ = None
+ self.__code__ = None
+ self.__name__ = ''
+"""
+ if version[0] >= 3:
+ txt += """
+ self.__annotations__ = {}
+ self.__kwdefaults__ = {}
+"""
+ if version[0] >= 3 and version[1] >= 3:
+ txt += """
+ self.__qualname__ = ''
+"""
+ return txt
+
+
def _searchbases(cls, accum):
# logic copied from inspect.py
if cls not in accum:
diff --git a/python/helpers/pydev/pydevd_comm.py b/python/helpers/pydev/pydevd_comm.py
index 9d4f2e78b94d..b4cf585e5cc2 100644
--- a/python/helpers/pydev/pydevd_comm.py
+++ b/python/helpers/pydev/pydevd_comm.py
@@ -427,7 +427,7 @@ def StartClient(host, port):
s = socket(AF_INET, SOCK_STREAM)
- MAX_TRIES = 20
+ MAX_TRIES = 100
i = 0
while i<MAX_TRIES:
try:
diff --git a/python/helpers/pydev/pydevd_signature.py b/python/helpers/pydev/pydevd_signature.py
index 2f9a1820fde4..e11bb5dd446b 100644
--- a/python/helpers/pydev/pydevd_signature.py
+++ b/python/helpers/pydev/pydevd_signature.py
@@ -47,7 +47,7 @@ class SignatureFactory(object):
name = code.co_varnames[i]
tp = type(locals[name])
class_name = tp.__name__
- if class_name == 'instance':
+ if class_name == 'instance': # old-style classes
tp = locals[name].__class__
class_name = tp.__name__