diff --git a/autoload/SpaceVim/layers/lang/python.vim b/autoload/SpaceVim/layers/lang/python.vim index 445e35c68..2ecdba962 100644 --- a/autoload/SpaceVim/layers/lang/python.vim +++ b/autoload/SpaceVim/layers/lang/python.vim @@ -106,7 +106,7 @@ function! SpaceVim#layers#lang#python#plugins() abort " but we need to disable the completions of jedi-vim. let g:jedi#completions_enabled = 0 endif - call add(plugins, ['davidhalter/jedi-vim', { 'on_ft' : 'python', + call add(plugins, [g:_spacevim_root_dir . 'bundle/jedi-vim', { 'on_ft' : 'python', \ 'if' : has('python') || has('python3')}]) endif call add(plugins, ['heavenshell/vim-pydocstring', diff --git a/bundle/jedi-vim/.github/FUNDING.yml b/bundle/jedi-vim/.github/FUNDING.yml new file mode 100644 index 000000000..418e47a80 --- /dev/null +++ b/bundle/jedi-vim/.github/FUNDING.yml @@ -0,0 +1 @@ +github: [davidhalter] diff --git a/bundle/jedi-vim/.github/ISSUE_TEMPLATE.md b/bundle/jedi-vim/.github/ISSUE_TEMPLATE.md new file mode 100644 index 000000000..582f51f5a --- /dev/null +++ b/bundle/jedi-vim/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,44 @@ +### Issue + + + +### Steps to reproduce + + + +### Output of “:verbose JediDebugInfo” + + diff --git a/bundle/jedi-vim/.github/workflows/ci.yml b/bundle/jedi-vim/.github/workflows/ci.yml new file mode 100644 index 000000000..235f429b4 --- /dev/null +++ b/bundle/jedi-vim/.github/workflows/ci.yml @@ -0,0 +1,63 @@ +name: ci +on: [push, pull_request] + +jobs: + tests: + runs-on: ubuntu-20.04 + steps: + - name: Checkout code + uses: actions/checkout@v2 + with: + submodules: recursive + + - name: Setup + run: | + sudo pip install pytest + vim --version + + #- name: Setup tmate session + # uses: mxschmitt/action-tmate@v3 + + - name: Run tests + run: 'make test' + + code-quality: + runs-on: ubuntu-20.04 + steps: + - name: Checkout code + uses: actions/checkout@v2 + with: + submodules: recursive + + - name: Run tests + run: | + vim --version + make check + + coverage: + runs-on: ubuntu-20.04 + + steps: + - name: Checkout code + uses: actions/checkout@v2 + with: + submodules: recursive + + - name: Install dependencies + run: | + sudo add-apt-repository ppa:neovim-ppa/stable -y + sudo apt-get update -q + sudo apt-get install neovim -y + + sudo pip install pynvim pytest-cov + sudo pip list + nvim --version + + - name: Run tests + run: | + make --keep-going test_coverage BUILD_VIRTUAL_ENV=$VIRTUAL_ENV + + - name: Upload coverage data + run: | + coverage xml + bash <(curl -s https://codecov.io/bash) -X fix -f coverage.xml -F py${TRAVIS_PYTHON_VERSION//./} diff --git a/bundle/jedi-vim/.gitignore b/bundle/jedi-vim/.gitignore new file mode 100644 index 000000000..ebe323099 --- /dev/null +++ b/bundle/jedi-vim/.gitignore @@ -0,0 +1,8 @@ +*~ +*.sw? +*.py[cod] +.ropeproject +doc/tags +.pytest-cache +build +.coverage* diff --git a/bundle/jedi-vim/.gitmodules b/bundle/jedi-vim/.gitmodules new file mode 100644 index 000000000..04d8fe610 --- /dev/null +++ b/bundle/jedi-vim/.gitmodules @@ -0,0 +1,6 @@ +[submodule "jedi"] + path = pythonx/jedi + url = https://github.com/davidhalter/jedi.git +[submodule "pythonx/parso"] + path = pythonx/parso + url = https://github.com/davidhalter/parso.git diff --git a/bundle/jedi-vim/.travis.yml b/bundle/jedi-vim/.travis.yml new file mode 100644 index 000000000..7c2db150f --- /dev/null +++ b/bundle/jedi-vim/.travis.yml @@ -0,0 +1,31 @@ +dist: bionic +language: python +python: 3.8 +env: + - ENV=test + - ENV=check + - ENV=test_coverage +install: + - | + if [ "$ENV" = "test" ]; then + pip install pytest + elif [ "$ENV" = "test_coverage" ]; then + sudo add-apt-repository ppa:neovim-ppa/stable -y + sudo apt-get update -q + sudo apt-get install neovim -y + + pip install pynvim pytest-cov + pip list + nvim --version + else + vim --version + fi +script: + - make --keep-going "$ENV" BUILD_VIRTUAL_ENV=$VIRTUAL_ENV + +after_script: + - | + if [ "$ENV" = "test_coverage" ]; then + coverage xml + travis_retry bash <(curl -s https://codecov.io/bash) -X fix -f coverage.xml -F py${TRAVIS_PYTHON_VERSION//./} + fi diff --git a/bundle/jedi-vim/AUTHORS.txt b/bundle/jedi-vim/AUTHORS.txt new file mode 100644 index 000000000..0dc43efe9 --- /dev/null +++ b/bundle/jedi-vim/AUTHORS.txt @@ -0,0 +1,61 @@ +Main Authors +============ + +David Halter (@davidhalter) + + +Contributors (in order of contributions) +======================================== + +Patrice Peterson (@runiq) +tek (@tek) +heavenshell (@heavenshell) +Danilo Bargen (@dbrgn) +mattn (@mattn) +Enrico Batista da Luz (@ricobl) +coot (@coot) +Artur Dryomov (@ming13) +andviro (@andviro) +Jean-Louis Fuchs (@ganwell) +Mathieu Comandon (@strycore) +Nick Hurley (@todesschaf) +gpoulin (@gpoulin) +Akinori Hattori (@hattya) +Luper Rouch (@flupke) +Matthew Moses (@mlmoses) +Tyler Wymer (@twymer) +Artem Nezvigin (@artnez) +rogererens (@rogererens) +Emily Strickland (@emilyst) +Tin Tvrtković (@Tinche) +Zekeriya Koc (@zekzekus) +ethinx (@ethinx) +Wouter Overmeire (@lodagro) +Stephen J. Fuhry (@fuhrysteve) +Sheng Yun (@ShengYun) +Yann Thomas-Gérard (@inside) +Colin Su (@littleq0903) +Arthur Jaron (@eyetracker) +Justin M. Keyes (@justinmk) +nagev (@np1) +Chris Lasher (@gotgenes) +Doan Thanh Nam (@tndoan) +Markus Koller (@toupeira) +Justin Cheevers @justincheevers +Talha Ahmed (@talha81) +Matthew Tylee Atkinson (@matatk) +Pedro Ferrari (@petobens) +Daniel Hahler (@blueyed) +Dave Honneffer (@pearofducks) +Bagrat Aznauryan (@n9code) +Tomoyuki Kashiro (@kashiro) +Tommy Allen (@tweekmonster) +Mingliang (@Aulddays) +Brian Mego (@brianmego) +Stevan Milic (@stevanmilic) +Konstantin Glukhov (@Konstantin-Glukhov) +Seungchan An (@SeungChan92) +Thomas Blauth (@ThomasBlauth) +James Cherti (@jamescherti) + +@something are github user names. diff --git a/bundle/jedi-vim/CONTRIBUTING.md b/bundle/jedi-vim/CONTRIBUTING.md new file mode 100644 index 000000000..285eb3081 --- /dev/null +++ b/bundle/jedi-vim/CONTRIBUTING.md @@ -0,0 +1,12 @@ +# We <3 pull requests! + + 1. Fork the Repo on github. + 2. Add yourself to AUTHORS.txt + 3. Add a test if possible. + 4. Push to your fork and submit a pull request. + +Please use PEP8 as a Python code style. For VIM, just try to style your +code similar to the jedi-vim code that is already there. + +# Bug reports +Please include the output of `:version` and `:JediDebugInfo`. diff --git a/bundle/jedi-vim/LICENSE.txt b/bundle/jedi-vim/LICENSE.txt new file mode 100644 index 000000000..12f223fc9 --- /dev/null +++ b/bundle/jedi-vim/LICENSE.txt @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) <2013> + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/bundle/jedi-vim/Makefile b/bundle/jedi-vim/Makefile new file mode 100644 index 000000000..02a199179 --- /dev/null +++ b/bundle/jedi-vim/Makefile @@ -0,0 +1,35 @@ +BUILD_VIRTUAL_ENV:=build/venv + +test: + pytest + +test_nvim: + VSPEC_VIM=nvim pytest + +test_coverage: export PYTEST_ADDOPTS:=--cov pythonx --cov test --cov-report=term-missing:skip-covered +test_coverage: test_nvim + +$(dir $(BUILD_VIRTUAL_ENV)): + mkdir -p $@ + +$(BUILD_VIRTUAL_ENV): | $(dir $(BUILD_VIRTUAL_ENV)) + python -m venv $@ + +$(BUILD_VIRTUAL_ENV)/bin/vint: | $(BUILD_VIRTUAL_ENV) + $|/bin/python -m pip install vim-vint==0.3.21 + +$(BUILD_VIRTUAL_ENV)/bin/flake8: | $(BUILD_VIRTUAL_ENV) + $|/bin/python -m pip install -q flake8==3.7.8 + +vint: $(BUILD_VIRTUAL_ENV)/bin/vint + $(BUILD_VIRTUAL_ENV)/bin/vint after autoload ftplugin plugin + +flake8: $(BUILD_VIRTUAL_ENV)/bin/flake8 + $(BUILD_VIRTUAL_ENV)/bin/flake8 pythonx/jedi_*.py + +check: vint flake8 + +clean: + rm -rf build + +.PHONY: test check clean vint flake8 diff --git a/bundle/jedi-vim/README.rst b/bundle/jedi-vim/README.rst new file mode 100644 index 000000000..ac82362c9 --- /dev/null +++ b/bundle/jedi-vim/README.rst @@ -0,0 +1,293 @@ +.. image:: https://github.com/davidhalter/jedi-vim/blob/master/doc/logotype-a.svg + +################################################# +jedi-vim - awesome Python autocompletion with VIM +################################################# + +.. image:: https://travis-ci.org/davidhalter/jedi-vim.svg?branch=master + :target: https://travis-ci.org/davidhalter/jedi-vim + :alt: Travis-CI build status + +jedi-vim is a VIM binding to the autocompletion library +`Jedi `_. + +Here are some pictures: + +.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_complete.png + +Completion for almost anything (Ctrl+Space). + +.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_function.png + +Display of function/class bodies, docstrings. + +.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_pydoc.png + +Documentation (Pydoc) support (with highlighting, Shift+k). + +There is also support for goto and renaming. + + +Get the latest from `github `_. + +Documentation +============= + +Documentation is available in your vim: ``:help jedi-vim``. You can also look +it up `on github `_. + +You can read the Jedi library documentation `here `_. + +If you want to report issues, just use the github issue tracker. In case of +questions about the software, please use `stackoverflow +`_ and tag your question with ``jedi-vim``. + + +Contributing +============ + +We love Pull Requests! Read the instructions in ``CONTRIBUTING.md``. + + +Features +======== + +The Jedi library understands most of Python's core features. From decorators to +generators, there is broad support. + +Apart from that, jedi-vim supports the following commands + +- Completion ```` +- Goto assignment ``g`` (typical goto function) +- Goto definition ``d`` (follow identifier as far as possible, + includes imports and statements) +- Goto (typing) stub ``s`` +- Show Documentation/Pydoc ``K`` (shows a popup with assignments) +- Renaming ``r`` +- Usages ``n`` (shows all the usages of a name) +- Open module, e.g. ``:Pyimport os`` (opens the ``os`` module) + + +Installation +============ + +Requirements +------------ +You need a VIM version that was compiled with Python 2.7 or later +(``+python`` or ``+python3``). You can check this from within VIM using +``:python3 import sys; print(sys.version)`` (use ``:python`` for Python 2). + +Manual installation +------------------- + +You might want to use `pathogen `_ or +`Vundle `_ to install jedi-vim. + +The first thing you need after that is an up-to-date version of Jedi. Install +``git submodule update --init --recursive`` in your jedi-vim repository. + +Example installation command using Pathogen: + +.. code-block:: sh + + git clone --recursive https://github.com/davidhalter/jedi-vim.git ~/.vim/bundle/jedi-vim + +Example installation using Vundle: + +Add the following line in your `~/.vimrc` + +.. code-block:: vim + + Plugin 'davidhalter/jedi-vim' + +For installing Jedi, ``pip install jedi`` will also work, but you might run +into issues when working in virtual environments. Please use git submodules. + + +Installation with your distribution +----------------------------------- + +On Arch Linux, you can also install jedi-vim from official repositories as +`vim-jedi `__. +It is also available on +`Debian (≥8) `__ and +`Ubuntu (≥14.04) `__ as +vim-python-jedi. +On Fedora Linux, it is available as +`vim-jedi `__. + +Please note that this version might be quite old compared to using jedi-vim +from Git. + +Caveats +------- + +Note that the `python-mode `_ VIM plugin seems +to conflict with jedi-vim, therefore you should disable it before enabling +jedi-vim. + +To enjoy the full features of jedi-vim, you should have VIM >= 7.3, compiled with +``+conceal`` (which is not the case on some platforms, including OS X). If your VIM +does not meet these requirements, the parameter recommendation list may not appear +when you type an open bracket after a function name. Please read +`the documentation `_ +for details. + + +Settings +======== + +Jedi is by default automatically initialized. If you don't want that I suggest +you disable the auto-initialization in your ``.vimrc``: + +.. code-block:: vim + + let g:jedi#auto_initialization = 0 + +There are also some VIM options (like ``completeopt`` and key defaults) which +are automatically initialized, but you can skip this: + +.. code-block:: vim + + let g:jedi#auto_vim_configuration = 0 + + +You can make jedi-vim use tabs when going to a definition etc: + +.. code-block:: vim + + let g:jedi#use_tabs_not_buffers = 1 + +If you are a person who likes to use VIM-splits, you might want to put this in your ``.vimrc``: + +.. code-block:: vim + + let g:jedi#use_splits_not_buffers = "left" + +This options could be "left", "right", "top", "bottom" or "winwidth". It will decide the direction where the split open. + +Jedi automatically starts the completion, if you type a dot, e.g. ``str.``, if +you don't want this: + +.. code-block:: vim + + let g:jedi#popup_on_dot = 0 + +Jedi selects the first line of the completion menu: for a better typing-flow +and usually saves one keypress. + +.. code-block:: vim + + let g:jedi#popup_select_first = 0 + +Jedi displays function call signatures in insert mode in real-time, highlighting +the current argument. The call signatures can be displayed as a pop-up in the +buffer (set to 1 by default (with the conceal feature), 2 otherwise), +which has the advantage of being easier to refer to (but is a hack with +many drawbacks since it changes the buffer's contents), +or in Vim's command line aligned with the function call (set to 2), which +can improve the integrity of Vim's undo history. + +.. code-block:: vim + + let g:jedi#show_call_signatures = "1" + +Here are a few more defaults for actions, read the docs (``:help jedi-vim``) to +get more information. If you set them to ``""``, they are not assigned. + +.. code-block:: vim + + NOTE: subject to change! + + let g:jedi#goto_command = "d" + let g:jedi#goto_assignments_command = "g" + let g:jedi#goto_stubs_command = "s" + let g:jedi#goto_definitions_command = "" + let g:jedi#documentation_command = "K" + let g:jedi#usages_command = "n" + let g:jedi#completions_command = "" + let g:jedi#rename_command = "r" + +An example for setting up your project: + +.. code-block:: vim + + let g:jedi#environment_path = "/usr/bin/python3.9" + +jedi-vim tries its best to guess your virtual env. If you want to work with a +specific virtual environment however, you can point jedi-vim towards it: + +.. code-block:: vim + + let g:jedi#environment_path = "venv" + +Finally, if you don't want completion, but all the other features, use: + +.. code-block:: vim + + let g:jedi#completions_enabled = 0 + +FAQ +=== + +I want to use Jedi with a Python 2 Environment, but it's not listed under "Known environments" +---------------------------------------------------------------------------------------------- + +Starting with version 0.18.0 Jedi dropped support for Python 2. + + +I don't want the docstring window to popup during completion +------------------------------------------------------------ + +This depends on the ``completeopt`` option. Jedi initializes it in its +``ftplugin``. Add the following line to your ``.vimrc`` to disable it: + +.. code-block:: vim + + autocmd FileType python setlocal completeopt-=preview + + +I want to do autocompletion +--------------------------------- + +Don't even think about changing the Jedi command to ````, +use `supertab `_! + + +The completion is too slow! +--------------------------- + +1. Completion of complex libraries (like Numpy) should only be slow the first + time you complete them. After that the results should be cached and very fast. + +2. If it is still slow after the initial completion and you have installed the + python-mode Vim plugin, try disabling its rope mode: + + .. code-block:: vim + + let g:pymode_rope = 0 + + See issue `#163 `__. + +3. You can also use `deoplete-jedi `__ + for completions, which uses Jedi, but does completions asynchronously + (requires Neovim). + It makes sense to use both jedi-vim and deoplete-jedi, but you should disable + jedi-vim's completions then: + + .. code-block:: vim + + let g:jedi#completions_enabled = 0 + +Testing +======= + +jedi-vim is being tested with a combination of `vspec +`_ and `py.test `_. + +The tests are in the ``test`` subdirectory, you can run them calling:: + + py.test + +The tests are automatically run with `travis +`_. diff --git a/bundle/jedi-vim/after/ftplugin/python/jedi.vim b/bundle/jedi-vim/after/ftplugin/python/jedi.vim new file mode 100644 index 000000000..f19836039 --- /dev/null +++ b/bundle/jedi-vim/after/ftplugin/python/jedi.vim @@ -0,0 +1,3 @@ +if jedi#init_python() && g:jedi#auto_initialization && g:jedi#completions_enabled + call jedi#setup_completion() +endif diff --git a/bundle/jedi-vim/after/syntax/python.vim b/bundle/jedi-vim/after/syntax/python.vim new file mode 100644 index 000000000..08d0d1e7a --- /dev/null +++ b/bundle/jedi-vim/after/syntax/python.vim @@ -0,0 +1,34 @@ +if !jedi#init_python() + finish +endif + +if g:jedi#show_call_signatures > 0 && has('conceal') + " +conceal is the default for vim >= 7.3 + + let s:e = g:jedi#call_signature_escape + let s:full = s:e.'jedi=.\{-}'.s:e.'.\{-}'.s:e.'jedi'.s:e + let s:ignore = s:e.'jedi.\{-}'.s:e + exe 'syn match jediIgnore "'.s:ignore.'" contained conceal' + setlocal conceallevel=2 + syn match jediFatSymbol "\*_\*" contained conceal + syn match jediFat "\*_\*.\{-}\*_\*" contained contains=jediFatSymbol + syn match jediSpace "\v[ ]+( )@=" contained + exe 'syn match jediFunction "'.s:full.'" keepend extend ' + \ .' contains=jediIgnore,jediFat,jediSpace' + \ .' containedin=pythonComment,pythonString,pythonRawString' + unlet! s:e s:full s:ignore + + hi def link jediIgnore Ignore + hi def link jediFatSymbol Ignore + hi def link jediSpace Normal + + if exists('g:colors_name') + hi def link jediFunction CursorLine + hi def link jediFat TabLine + else + hi def jediFunction term=NONE cterm=NONE ctermfg=6 guifg=Black gui=NONE ctermbg=0 guibg=Grey + hi def jediFat term=bold,underline cterm=bold,underline gui=bold,underline ctermbg=0 guibg=#555555 + endif +endif + +hi def jediUsage cterm=reverse gui=standout diff --git a/bundle/jedi-vim/autoload/health/jedi.vim b/bundle/jedi-vim/autoload/health/jedi.vim new file mode 100644 index 000000000..8c80e93f3 --- /dev/null +++ b/bundle/jedi-vim/autoload/health/jedi.vim @@ -0,0 +1,4 @@ +function! health#jedi#check() abort + call health#report_start('jedi') + silent call jedi#debug_info() +endfunction diff --git a/bundle/jedi-vim/autoload/jedi.vim b/bundle/jedi-vim/autoload/jedi.vim new file mode 100644 index 000000000..dcef9b6dd --- /dev/null +++ b/bundle/jedi-vim/autoload/jedi.vim @@ -0,0 +1,732 @@ +scriptencoding utf-8 + +" ------------------------------------------------------------------------ +" Settings initialization +" ------------------------------------------------------------------------ +let s:deprecations = { + \ 'get_definition_command': 'goto_definitions_command', + \ 'pydoc': 'documentation_command', + \ 'related_names_command': 'usages_command', + \ 'autocompletion_command': 'completions_command', + \ 'show_function_definition': 'show_call_signatures', +\ } + +let s:default_settings = { + \ 'use_tabs_not_buffers': 0, + \ 'use_splits_not_buffers': 1, + \ 'auto_initialization': 1, + \ 'auto_vim_configuration': 1, + \ 'goto_command': "'d'", + \ 'goto_assignments_command': "'g'", + \ 'goto_definitions_command': "''", + \ 'goto_stubs_command': "'s'", + \ 'completions_command': "''", + \ 'call_signatures_command': "'n'", + \ 'usages_command': "'n'", + \ 'rename_command': "'r'", + \ 'completions_enabled': 1, + \ 'popup_on_dot': 'g:jedi#completions_enabled', + \ 'documentation_command': "'K'", + \ 'show_call_signatures': has('conceal') ? 1 : 2, + \ 'show_call_signatures_delay': 500, + \ 'call_signature_escape': "'?!?'", + \ 'auto_close_doc': 1, + \ 'max_doc_height': 30, + \ 'popup_select_first': 1, + \ 'quickfix_window_height': 10, + \ 'force_py_version': "'auto'", + \ 'environment_path': "'auto'", + \ 'added_sys_path': '[]', + \ 'project_path': "'auto'", + \ 'smart_auto_mappings': 0, + \ 'case_insensitive_completion': 1, + \ 'use_tag_stack': 1 +\ } + +for [s:key, s:val] in items(s:deprecations) + if exists('g:jedi#'.s:key) + echom "'g:jedi#".s:key."' is deprecated. Please use 'g:jedi#".s:val."' instead. Sorry for the inconvenience." + exe 'let g:jedi#'.s:val.' = g:jedi#'.s:key + endif +endfor + +for [s:key, s:val] in items(s:default_settings) + if !exists('g:jedi#'.s:key) + exe 'let g:jedi#'.s:key.' = '.s:val + endif +endfor + +let s:supports_buffer_usages = has('nvim') || exists('*prop_add') + + +" ------------------------------------------------------------------------ +" Python initialization +" ------------------------------------------------------------------------ +let s:script_path = expand(':p:h:h') + +function! s:init_python() abort + " Use g:jedi#force_py_version for loading Jedi, or fall back to using + " `has()` - preferring Python 3. + if !has('python3') + throw 'jedi-vim requires Vim with support for Python 3.' + endif + call jedi#setup_python_imports() + return 1 +endfunction + + +function! jedi#reinit_python() abort + let s:_init_python = -1 + call jedi#init_python() +endfunction + + +" This is meant to be called with `:unsilent` (for &shortmess+=F). +function! s:display_exception() abort + let error_lines = split(v:exception, '\n') + let msg = 'Error: jedi-vim failed to initialize Python: ' + \ .error_lines[0].' (in '.v:throwpoint.')' + if len(error_lines) > 1 + echohl ErrorMsg + echom 'jedi-vim error: '.error_lines[0] + for line in error_lines[1:] + echom line + endfor + echohl None + let help_cmd = ':JediDebugInfo' + if exists(':checkhealth') == 2 + let help_cmd .= ' / :checkhealth' + endif + let msg .= printf('. See :messages and/or %s for more information.', + \ help_cmd) + endif + redraw " Redraw to only have the main message by default. + echoerr msg +endfunction + + +let s:_init_python = -1 +function! jedi#init_python() abort + if s:_init_python == -1 + let s:_init_python = 0 + try + let s:_init_python = s:init_python() + let s:_init_python = 1 + catch /^jedi/ + " Only catch errors from jedi-vim itself here, so that for + " unexpected Python exceptions the traceback will be shown + " (e.g. with NameError in jedi#setup_python_imports's code). + if !exists('g:jedi#squelch_py_warning') + unsilent call s:display_exception() + endif + endtry + endif + return s:_init_python +endfunction + + +function! jedi#setup_python_imports() abort + let g:_jedi_init_error = 0 + let init_lines = [ + \ 'import vim', + \ 'def _jedi_handle_exc(exc_info):', + \ ' try:', + \ ' from jedi_vim_debug import format_exc_info', + \ ' vim.vars["_jedi_init_error"] = format_exc_info(exc_info)', + \ ' except Exception:', + \ ' import traceback', + \ ' vim.vars["_jedi_init_error"] = "\\n".join(traceback.format_exception(*exc_info))', + \ 'try:', + \ ' import jedi_vim', + \ ' if hasattr(jedi_vim, "jedi_import_error"):', + \ ' _jedi_handle_exc(jedi_vim.jedi_import_error)', + \ 'except Exception as exc:', + \ ' _jedi_handle_exc(sys.exc_info())', + \ ] + exe 'python3 exec('''.escape(join(init_lines, '\n'), "'").''')' + if g:_jedi_init_error isnot 0 + throw printf('jedi#setup_python_imports: %s', g:_jedi_init_error) + endif + return 1 +endfunction + + +function! jedi#debug_info() abort + if &verbose + if &filetype !=# 'python' + echohl WarningMsg | echo 'You should run this in a buffer with filetype "python".' | echohl None + endif + endif + let spath = shellescape(s:script_path) + echo '#### Jedi-vim debug information' + echo "\n" + echo '##### jedi-vim version' + echo "\n" + echo ' - jedi-vim git version: ' + echon substitute(system('git -C '.spath.' describe --tags --always --dirty'), '\v\n$', '', '') + echo ' - jedi git submodule status: ' + echon substitute(system('git -C '.spath.' submodule status pythonx/jedi'), '\v\n$', '', '') + echo ' - parso git submodule status: ' + echon substitute(system('git -C '.spath.' submodule status pythonx/parso'), '\v\n$', '', '') + echo "\n" + echo '##### Global Python' + echo "\n" + echo 'Using Python version 3 to access Jedi.' + let s:pythonjedi_called = 0 + try + python3 import vim; vim.command('let s:pythonjedi_called = 1') + catch + echo 'Error when trying to import vim: '.v:exception + endtry + if !s:pythonjedi_called + echohl WarningMsg + echom 'python3 failed to run, likely a Python config issue.' + if exists(':checkhealth') == 2 + echom 'Try :checkhealth for more information.' + endif + echohl None + else + try + python3 from jedi_vim_debug import display_debug_info + python3 display_debug_info() + catch + echohl WarningMsg + echo 'Error when running display_debug_info: '.v:exception + echohl None + endtry + endif + echo "\n" + echo '##### Settings' + echo "\n" + echo '```' + let jedi_settings = items(filter(copy(g:), "v:key =~# '\\v^jedi#'")) + let has_nondefault_settings = 0 + for [k, V] in jedi_settings + exe 'let default = '.get(s:default_settings, + \ substitute(k, '\v^jedi#', '', ''), "'-'") + " vint: -ProhibitUsingUndeclaredVariable + if default !=# V + echo printf('g:%s = %s (default: %s)', k, string(V), string(default)) + unlet! V " Fix variable type mismatch with Vim 7.3. + let has_nondefault_settings = 1 + endif + " vint: +ProhibitUsingUndeclaredVariable + endfor + if has_nondefault_settings + echo "\n" + endif + verb set omnifunc? completeopt? + echo '```' + + if &verbose + echo "\n" + echo '#### :version' + echo '```' + version + echo '```' + echo "\n" + echo '#### :messages' + echo '```' + messages + echo '```' + echo "\n" + echo '
:scriptnames' + echo "\n" + echo '```' + scriptnames + echo '```' + echo '
' + endif +endfunction + +" Helper function instead of `python vim.eval()`, and `.command()` because +" these also return error definitions. +function! jedi#_vim_exceptions(str, is_eval) abort + let l:result = {} + try + if a:is_eval + let l:result.result = eval(a:str) + else + execute a:str + let l:result.result = '' + endif + catch + let l:result.exception = v:exception + let l:result.throwpoint = v:throwpoint + endtry + return l:result +endfunction + +call jedi#init_python() " Might throw an error. + +" ------------------------------------------------------------------------ +" functions that call python code +" ------------------------------------------------------------------------ +function! jedi#goto() abort + python3 jedi_vim.goto(mode="goto") +endfunction + +function! jedi#goto_assignments() abort + python3 jedi_vim.goto(mode="assignment") +endfunction + +function! jedi#goto_definitions() abort + python3 jedi_vim.goto(mode="definition") +endfunction + +function! jedi#goto_stubs() abort + python3 jedi_vim.goto(mode="stubs") +endfunction + +function! jedi#usages() abort + if exists('#jedi_usages#BufWinEnter') + call jedi#clear_usages() + endif + python3 jedi_vim.usages() +endfunction + +if !s:supports_buffer_usages +" Hide usages in the current window. +" Only handles the current window due to matchdelete() restrictions. +function! jedi#_hide_usages_in_win() abort + let winnr = winnr() + let matchids = getwinvar(winnr, '_jedi_usages_vim_matchids', []) + + for matchid in matchids[1:] + call matchdelete(matchid) + endfor + call setwinvar(winnr, '_jedi_usages_vim_matchids', []) + + " Remove the autocommands that might have triggered this function. + augroup jedi_usages + exe 'autocmd! * ' + augroup END + unlet! b:_jedi_usages_needs_clear +endfunction + +" Show usages for current window (Vim without textprops only). +function! jedi#_show_usages_in_win() abort + python3 jedi_vim.highlight_usages_for_vim_win() + + if !exists('#jedi_usages#TextChanged#') + augroup jedi_usages + " Unset highlights on any changes to this buffer. + " NOTE: Neovim's API handles movement of highlights, but would only + " need to clear highlights that are changed inline. + autocmd TextChanged call jedi#_clear_buffer_usages() + + " Hide usages when the buffer is removed from the window, or when + " entering insert mode (but keep them for later). + autocmd BufWinLeave,InsertEnter call jedi#_hide_usages_in_win() + augroup END + endif +endfunction + +" Remove usages for the current buffer (and all its windows). +function! jedi#_clear_buffer_usages() abort + let bufnr = bufnr('%') + let nvim_src_ids = getbufvar(bufnr, '_jedi_usages_src_ids', []) + if !empty(nvim_src_ids) + for src_id in nvim_src_ids + " TODO: could only clear highlights below/after changed line?! + call nvim_buf_clear_highlight(bufnr, src_id, 0, -1) + endfor + else + call jedi#_hide_usages_in_win() + endif +endfunction +endif + +" Remove/unset global usages. +function! jedi#clear_usages() abort + augroup jedi_usages + autocmd! BufWinEnter + autocmd! WinEnter + augroup END + + if !s:supports_buffer_usages + " Vim without textprops: clear current window, + " autocommands will clean others on demand. + call jedi#_hide_usages_in_win() + + " Setup autocommands to clear remaining highlights on WinEnter. + augroup jedi_usages + for b in range(1, bufnr('$')) + if getbufvar(b, '_jedi_usages_needs_clear') + exe 'autocmd WinEnter call jedi#_hide_usages_in_win()' + endif + endfor + augroup END + endif + + python3 jedi_vim.clear_usages() +endfunction + +function! jedi#rename(...) abort + python3 jedi_vim.rename() +endfunction + +function! jedi#rename_visual(...) abort + python3 jedi_vim.rename_visual() +endfunction + +function! jedi#completions(findstart, base) abort + python3 jedi_vim.completions() +endfunction + +function! jedi#enable_speed_debugging() abort + python3 jedi_vim.jedi.set_debug_function(jedi_vim.print_to_stdout, speed=True, warnings=False, notices=False) +endfunction + +function! jedi#enable_debugging() abort + python3 jedi_vim.jedi.set_debug_function(jedi_vim.print_to_stdout) +endfunction + +function! jedi#disable_debugging() abort + python3 jedi_vim.jedi.set_debug_function(None) +endfunction + +function! jedi#py_import(args) abort + python3 jedi_vim.py_import() +endfun + +function! jedi#choose_environment(args) abort + python3 jedi_vim.choose_environment() +endfun + +function! jedi#load_project(args) abort + python3 jedi_vim.load_project() +endfun + +function! jedi#py_import_completions(argl, cmdl, pos) abort + python3 jedi_vim.py_import_completions() +endfun + +function! jedi#clear_cache(bang) abort + if a:bang + python3 jedi_vim.jedi.cache.clear_time_caches(True) + else + python3 jedi_vim.jedi.cache.clear_time_caches(False) + endif +endfunction + + +" ------------------------------------------------------------------------ +" show_documentation +" ------------------------------------------------------------------------ +function! jedi#show_documentation() abort + python3 if jedi_vim.show_documentation() is None: vim.command('return') + + let bn = bufnr('__doc__') + if bn > 0 + let wi=index(tabpagebuflist(tabpagenr()), bn) + if wi >= 0 + " If the __doc__ buffer is open in the current tab, jump to it + silent execute (wi+1).'wincmd w' + else + silent execute 'sbuffer '.bn + endif + else + split __doc__ + endif + + setlocal modifiable + setlocal noswapfile + setlocal buftype=nofile + silent normal! ggdG + silent $put=l:doc + silent normal! 1Gdd + setlocal nomodifiable + setlocal nomodified + setlocal filetype=rst + setlocal foldlevel=200 " do not fold in __doc__ + + if l:doc_lines > g:jedi#max_doc_height " max lines for plugin + let l:doc_lines = g:jedi#max_doc_height + endif + execute 'resize '.l:doc_lines + + " quit comands + nnoremap q ZQ + if len(g:jedi#documentation_command) + execute 'nnoremap '.g:jedi#documentation_command.' ZQ' + endif +endfunction + +" ------------------------------------------------------------------------ +" helper functions +" ------------------------------------------------------------------------ + +function! jedi#add_goto_window(for_usages, len) abort + let height = min([a:len, g:jedi#quickfix_window_height]) + + " Use :copen to go to the window always - the user should select an entry. + execute 'belowright copen '.height + + if &filetype !=# 'qf' + echoerr printf('jedi-vim: unexpected ft with current window (%s), please report!', &filetype) + endif + if g:jedi#use_tabs_not_buffers == 1 + noremap :call jedi#goto_window_on_enter() + endif + + augroup jedi_goto_window + if a:for_usages + autocmd BufWinLeave call jedi#clear_usages() + else + autocmd WinLeave q " automatically leave, if an option is chosen + endif + augroup END + + if a:for_usages && !has('nvim') + if s:supports_buffer_usages + " Setup autocommand for pending highlights with Vim's textprops. + " (cannot be added to unlisted buffers) + augroup jedi_usages + autocmd! BufWinEnter * call s:usages_for_pending_buffers() + augroup END + else + " Setup global autocommand to display any usages for a window. + " Gets removed when closing the quickfix window that displays them, or + " when clearing them (e.g. on TextChanged). + augroup jedi_usages + autocmd! BufWinEnter,WinEnter * call jedi#_show_usages_in_win() + augroup END + endif + endif +endfunction + +" Highlight usages for a buffer if not done so yet (Neovim only). +function! s:usages_for_pending_buffers() abort + python3 jedi_vim._handle_pending_usages_for_buf() +endfunction + + +function! jedi#goto_window_on_enter() abort + let l:list = getqflist() + let l:data = l:list[line('.') - 1] + if l:data.bufnr + " close goto_window buffer + normal! ZQ + python3 jedi_vim.set_buffer(vim.eval('bufname(l:data.bufnr)')) + call cursor(l:data.lnum, l:data.col) + else + echohl WarningMsg | echo 'Builtin module cannot be opened.' | echohl None + endif +endfunction + + +function! s:syn_stack() abort + if !exists('*synstack') + return [] + endif + return map(synstack(line('.'), col('.') - 1), "synIDattr(v:val, 'name')") +endfunc + + +function! jedi#do_popup_on_dot_in_highlight() abort + let highlight_groups = s:syn_stack() + for a in highlight_groups + if a ==# 'pythonDoctest' + return 1 + endif + endfor + + for a in highlight_groups + for b in ['pythonString', 'pythonComment', 'pythonNumber'] + if a == b + return 0 + endif + endfor + endfor + return 1 +endfunc + + +let s:show_call_signatures_last = [0, 0, ''] +function! jedi#show_call_signatures() abort + if s:_init_python == 0 + return 1 + endif + let [line, col] = [line('.'), col('.')] + let curline = getline(line) + let reload_signatures = 1 + + " Caching. On the same line only. + if line == s:show_call_signatures_last[0] + " Check if the number of special signs before or after the + " cursor has not changed since the last call, which means that the + " argument position was not changed and we can skip repainting. + let prevcol = s:show_call_signatures_last[1] + let prevline = s:show_call_signatures_last[2] + let no_special = '[^,()=]' + if substitute(curline[:col-2], no_special, '', 'g') + \ == substitute(prevline[:prevcol-2], no_special, '', 'g') + \ && substitute(curline[(col-2):], no_special, '', 'g') + \ == substitute(prevline[(prevcol-2):], no_special, '', 'g') + let reload_signatures = 0 + endif + endif + let s:show_call_signatures_last = [line, col, curline] + + if reload_signatures + python3 jedi_vim.show_call_signatures() + endif +endfunction + + +function! jedi#clear_call_signatures() abort + if s:_init_python == 0 + return 1 + endif + + let s:show_call_signatures_last = [0, 0, ''] + python3 jedi_vim.clear_call_signatures() +endfunction + + +function! jedi#configure_call_signatures() abort + augroup jedi_call_signatures + autocmd! * + if g:jedi#show_call_signatures == 2 " Command line call signatures + autocmd InsertEnter let g:jedi#first_col = s:save_first_col() + endif + autocmd InsertEnter let s:show_call_signatures_last = [0, 0, ''] + autocmd InsertLeave call jedi#clear_call_signatures() + if g:jedi#show_call_signatures_delay > 0 + autocmd InsertEnter let b:_jedi_orig_updatetime = &updatetime + \ | let &updatetime = g:jedi#show_call_signatures_delay + autocmd InsertLeave if exists('b:_jedi_orig_updatetime') + \ | let &updatetime = b:_jedi_orig_updatetime + \ | unlet b:_jedi_orig_updatetime + \ | endif + autocmd CursorHoldI call jedi#show_call_signatures() + else + autocmd CursorMovedI call jedi#show_call_signatures() + endif + augroup END +endfunction + + +" Determine where the current window is on the screen for displaying call +" signatures in the correct column. +function! s:save_first_col() abort + if bufname('%') ==# '[Command Line]' || winnr('$') == 1 + return 0 + endif + + let startwin = winnr() + let winwidth = winwidth(0) + if winwidth == &columns + return 0 + elseif winnr('$') == 2 + return startwin == 1 ? 0 : (winwidth(1) + 1) + elseif winnr('$') == 3 + if startwin == 1 + return 0 + endif + let ww1 = winwidth(1) + let ww2 = winwidth(2) + let ww3 = winwidth(3) + if ww1 + ww2 + ww3 + 2 == &columns + if startwin == 2 + return ww1 + 1 + else + return ww1 + ww2 + 2 + endif + elseif startwin == 2 + if ww2 + ww3 + 1 == &columns + return 0 + else + return ww1 + 1 + endif + else " startwin == 3 + if ww2 + ww3 + 1 == &columns + return ww2 + 1 + else + return ww1 + 1 + endif + endif + endif + return 0 +endfunction + + +function! jedi#complete_string(autocomplete) abort + if a:autocomplete + if !(g:jedi#popup_on_dot && jedi#do_popup_on_dot_in_highlight()) + return '' + endif + + let s:saved_completeopt = &completeopt + set completeopt-=longest + set completeopt+=menuone + set completeopt-=menu + if &completeopt !~# 'noinsert\|noselect' + " Patch 775 introduced noinsert and noselect, previously these + " options didn't exist. Setting them in earlier versions results in + " errors (E474). + if has('patch-7.4-775') + if g:jedi#popup_select_first + set completeopt+=noinsert + else + set completeopt+=noselect + endif + else + " To pass the tests we use this, it seems to get the closest to + " the other options. I'm really not sure if this properly + " works, but VIM 7.4-775 is already pretty old, so it might not + " be a problem anymore in a few years. + set completeopt+=longest + endif + endif + elseif pumvisible() + return "\" + endif + return "\\\=jedi#complete_opened(".a:autocomplete.")\" +endfunction + + +function! jedi#complete_opened(autocomplete) abort + if a:autocomplete + let &completeopt = s:saved_completeopt + unlet s:saved_completeopt + elseif pumvisible() && g:jedi#popup_select_first && stridx(&completeopt, 'longest') > -1 + return "\" + endif + return '' +endfunction + + +function! jedi#smart_auto_mappings() abort + " Auto put import statement after from module.name and complete + if search('\m^\s*from\s\+[A-Za-z0-9._]\{1,50}\%#\s*$', 'bcn', line('.')) + " Enter character and start completion. + return "\import \=jedi#complete_string(1)\" + endif + return "\" +endfunction + + +function! jedi#setup_completion() abort + " We need our own omnifunc, so this overrides the omnifunc set by + " $VIMRUNTIME/ftplugin/python.vim. + setlocal omnifunc=jedi#completions + + " map ctrl+space for autocompletion + if g:jedi#completions_command ==# '' + " In terminals, sometimes equals . + imap + smap + endif + if len(g:jedi#completions_command) + execute 'inoremap '.g:jedi#completions_command.' jedi#complete_string(0)' + " A separate mapping for select mode: deletes and completes. + execute 'snoremap '.g:jedi#completions_command." '\c'.jedi#complete_string(0)" + endif +endfunction + +"python3 jedi_vim.jedi.set_debug_function(jedi_vim.print_to_stdout, speed=True, warnings=False, notices=False) +"python3 jedi_vim.jedi.set_debug_function(jedi_vim.print_to_stdout) + +" vim: set et ts=4: diff --git a/bundle/jedi-vim/codecov.yml b/bundle/jedi-vim/codecov.yml new file mode 100644 index 000000000..a0a308588 --- /dev/null +++ b/bundle/jedi-vim/codecov.yml @@ -0,0 +1,7 @@ +coverage: + status: + project: true + patch: true + changes: true + +comment: off diff --git a/bundle/jedi-vim/doc/jedi-vim.txt b/bundle/jedi-vim/doc/jedi-vim.txt new file mode 100644 index 000000000..3b94a6d5f --- /dev/null +++ b/bundle/jedi-vim/doc/jedi-vim.txt @@ -0,0 +1,578 @@ +*jedi-vim.txt* - For Vim version 7.3 - Last change: 2014/07/29 + __ _______ _______ __ ____ ____ __ .___ ___.~ + | | | ____|| \ | | \ \ / / | | | \/ |~ + | | | |__ | .--. || | _____\ \/ / | | | \ / |~ +.--. | | | __| | | | || | |______\ / | | | |\/| |~ +| `--' | | |____ | '--' || | \ / | | | | | |~ + \______/ |_______||_______/ |__| \__/ |__| |__| |__|~ + + jedi-vim - awesome Python autocompletion with Vim + +============================================================================== +Contents *jedi-vim-contents* + +1. Introduction |jedi-vim-introduction| +2. Installation |jedi-vim-installation| + 2.0. Requirements |jedi-vim-installation-requirements| + 2.1. Manually |jedi-vim-installation-manually| + 2.2. Using Pathogen |jedi-vim-installation-pathogen| + 2.3. Using Vundle |jedi-vim-installation-vundle| + 2.4. Installing from Repositories |jedi-vim-installation-repos| +3. Supported Python features |jedi-vim-support| +4. Usage |jedi-vim-usage| +5. Mappings |jedi-vim-keybindings| + 5.1. Start completion |g:jedi#completions_command| + 5.2. Go to definition |g:jedi#goto_command| + 5.3. Go to assignment |g:jedi#goto_assignments_command| + 5.4 Go to stub |g:jedi#goto_stubs_command| + 5.5. Show documentation |g:jedi#documentation_command| + 5.6. Rename variables |g:jedi#rename_command| + 5.7. Show name usages |g:jedi#usages_command| + 5.8. Open module by name |:Pyimport| +6. Configuration |jedi-vim-configuration| + 6.1. auto_initialization |g:jedi#auto_initialization| + 6.2. auto_vim_configuration |g:jedi#auto_vim_configuration| + 6.3. popup_on_dot |g:jedi#popup_on_dot| + 6.4. popup_select_first |g:jedi#popup_select_first| + 6.5. auto_close_doc |g:jedi#auto_close_doc| + 6.6. show_call_signatures |g:jedi#show_call_signatures| + 6.7. show_call_signatures_delay |g:jedi#show_call_signatures_delay| + 6.8. use_tabs_not_buffers |g:jedi#use_tabs_not_buffers| + 6.9. squelch_py_warning |g:jedi#squelch_py_warning| + 6.10. completions_enabled |g:jedi#completions_enabled| + 6.11. use_splits_not_buffers |g:jedi#use_splits_not_buffers| + 6.12. force_py_version |g:jedi#force_py_version| + 6.13. smart_auto_mappings |g:jedi#smart_auto_mappings| + 6.14. use_tag_stack |g:jedi#use_tag_stack| + 6.15. environment_path |g:jedi#environment_path| + |b:jedi_environment_path| + 6.16. added_sys_path |g:jedi#added_sys_path| + |b:jedi_added_sys_path| + 6.17. case_insensitive_completion |g:jedi#case_insensitive_completion| + |b:jedi_case_insensitive_completion| +7. Testing |jedi-vim-testing| +8. Contributing |jedi-vim-contributing| +9. License |jedi-vim-license| + +============================================================================== +1. Introduction *jedi-vim-introduction* + +Jedi-vim is a Vim binding to the awesome Python autocompletion library +`jedi`. Among jedi's (and, therefore, jedi-vim's) features are: + +- Completion for a wide array of Python features (see |jedi-vim-support|) +- Robust in dealing with syntax errors and wrong indentation +- Parses complex module/function/class structures +- Infers function arguments from Sphinx/Epydoc strings +- Doesn't execute Python code +- Supports Virtualenv +- Supports Python 2.7 and 3.4+ + +By leveraging this library, jedi-vim adds the following capabilities to Vim: + +- Displaying function/class bodies +- "Go to definition" command +- Displaying docstrings +- Renaming and refactoring +- Looking up related names + +============================================================================== +2. Installation *jedi-vim-installation* + +------------------------------------------------------------------------------ +2.0. Requirements *jedi-vim-installation-requirements* + +First of all, jedi-vim requires Vim to be compiled with the `+python` option. + +It is best if you have VIM >= 7.3, compiled with the `+conceal` option. With +older versions, you will probably not see the parameter recommendation list +for functions after typing the open bracket. Some platforms (including OS X +releases) do not ship a VIM with `+conceal`. You can check if your VIM has the +feature with > + + :ver + +and look for "`+conceal`" (as opposed to "`-conceal`") or > + + :echo has('conceal') + +which will report 0 (not included) or 1 (included). If your VIM lacks this +feature and you would like function parameter completion, you will need to +build your own VIM, or use a package for your operating system that has this +feature (such as MacVim on OS X, which also contains a console binary). + +------------------------------------------------------------------------------ +2.1. Installing manually *jedi-vim-installation-manually* + +1. If you want to install jedi as a submodule instead, issue this command: > + + git clone --recursive http://github.com/davidhalter/jedi-vim + +2. Put the plugin files into their respective folders in your vim runtime + directory (usually ~/.vim). Be sure to pay attention to the directory + structure! +3. Update the Vim help tags with > + + :helptags /doc + +------------------------------------------------------------------------------ +2.2. Installing using Pathogen *jedi-vim-installation-pathogen* + +Pathogen simplifies installation considerably. + +1.a Clone the git repository into your bundles directory: > + + git clone http://github.com/davidhalter/jedi-vim path/to/bundles/jedi-vim + +1b. Again, if you want to install jedi as a submodule, use this command + instead: > + + git clone --recursive http://github.com/davidhalter/jedi-vim + +------------------------------------------------------------------------------ +2.3. Installing using Vundle *jedi-vim-installation-vundle* + +1. Vundle automatically downloads subrepositories as git submodules, so you + will automatically get the jedi library with the jedi-vim plugin. Add the + following to the Bundles section in your .vimrc file: > + + Plugin 'davidhalter/jedi-vim' + +2. Issue the following command in Vim: > + + :PluginInstall + +Help tags are generated automatically, so you should be good to go. + +------------------------------------------------------------------------------ +2.4. Installing from Repositories *jedi-vim-installation-repos* + +Some Linux distributions have jedi-vim packages in their official +repositories. On Arch Linux, install vim-jedi. On Debian (8+) or Ubuntu +(14.04+) install vim-python-jedi. + +============================================================================== +3. Supported Python features *jedi-vim-support* + +The Jedi library does all the hard work behind the scenes. It understands most +Python features, among them: + +- Builtins +- Multiple `return`s or `yield`s +- Tuple assignments/array indexing/dictionary indexing +- `with`-statement/exception handling +- `*args` and `**kwargs` +- Decorators, lambdas, closures +- Generators, iterators +- Some descriptors: `property`/`staticmethod`/`classmethod` +- Some magic methods: `__call__`, `__iter__`, `__next__`, `__get__`, + `__getitem__`, `__init__` +- `list.append()`, `set.add()`, `list.extend()`, etc. +- (Nested) list comprehensions and ternary expressions +- Relative `import`s +- `getattr()`/`__getattr__`/`__getattribute__` +- Function annotations (py3k feature, are being ignored at the moment, but are + parsed) +- Class decorators (py3k feature, are being ignored at the moment, but are + parsed) +- Simple/usual `sys.path` modifications +- `isinstance` checks for `if`/`while`/`assert` case, that doesn't work with + Jedi +- Stubs +- And more... + +Note: This list is not necessarily up to date. For a complete list of +features, please refer to the Jedi documentation at +http://jedi.readthedocs.io. + +============================================================================== +4. Usage *jedi-vim-usage* + +With the default settings, autocompletion can be triggered by typing +. The first entry will automatically be selected, so you can press + to insert it into your code or keep typing and narrow down your +completion options. The usual and / keybindings work as +well. Autocompletion is also triggered by typing a period in insert mode. +Since periods rarely occur in Python code outside of method/import lookups, +this is handy to have (but can be disabled). + +When it encounters a new module, jedi might take a few seconds to parse that +module's contents. Afterwards, the contents are cached and completion will be +almost instantaneous. + +============================================================================== +5. Key Bindings *jedi-vim-keybindings* + +All keybindings can be mapped by setting the appropriate global option. For +example, to set the keybinding for starting omnicompletion to instead of +, add the following setting to your .vimrc file: > + + let g:jedi#completions_command = "" + +Note: If you have |g:jedi#auto_initialization| set to 0, you have to create +a mapping yourself by calling a function: > + + " Using for omnicompletion + inoremap + " Use r (by default <\-r>) for renaming + nnoremap r :call jedi#rename() + " etc. + +Note: You can set commands to '', which means that they are empty and not +assigned. It's an easy way to "disable" functionality of jedi-vim. + +------------------------------------------------------------------------------ +5.1. `g:jedi#completions_command` *g:jedi#completions_command* +Function: n/a; see above +Default: Start completion + +Performs autocompletion (or omnicompletion, to be precise). + +Note: If you want to use for completion, please install Supertab: +https://github.com/ervandew/supertab. + +------------------------------------------------------------------------------ +5.2. `g:jedi#goto_command` *g:jedi#goto_command* +Function: `jedi#goto()` +Default: d Go to definition (or assignment) + +This function first tries |jedi#goto_definitions|, and falls back to +|jedi#goto_assignments| for builtin modules. It produces an error if nothing +could be found. +NOTE: this implementation is subject to change. +Ref: https://github.com/davidhalter/jedi/issues/570 + +This command tries to find the original definition of the function/class under +the cursor. Just like the `jedi#goto_assignments()` function, it does not work +if the definition isn't in a Python source file. + +The difference between `jedi#goto_assignments()` and `jedi#goto_definitions()` +is that the latter performs recursive lookups. Take, for example, the +following module structure: > + + # file1.py: + from file2 import foo + + # file2.py: + from file3 import bar as foo + + # file3.py + def bar(): + pass + +The `jedi#goto_assignments()` function will take you to the > + + from file2 import foo + +statement in file1.py, while the `jedi#goto_definitions()` function will take +you all the way to the > + + def bar(): + +line in file3.py. + +------------------------------------------------------------------------------ +5.3. `g:jedi#goto_assignments_command` *g:jedi#goto_assignments_command* +Function: `jedi#goto_assignments()` +Default: g Go to assignment + +This function finds the first definition of the function/class under the +cursor. It produces an error if the definition is not in a Python file. + +------------------------------------------------------------------------------ +5.4. `g:jedi#goto_stubs_command` *g:jedi#goto_stubs_command* +Function: `jedi#goto_stubs()` +Default: s Go to stub + +Finds the stub of the function/class under the cursor. + +------------------------------------------------------------------------------ +5.5. `g:jedi#documentation_command` *g:jedi#documentation_command* +Function: `jedi#show_documentation()` +Default: Show pydoc documentation + +This shows the pydoc documentation for the item currently under the cursor. +The documentation is opened in a horizontally split buffer. The height of this +buffer is controlled by `g:jedi#max_doc_height` (set by default to 30). + +------------------------------------------------------------------------------ +5.6. `g:jedi#rename_command` *g:jedi#rename_command* +Function: `jedi#rename()` +Default: r Rename variables + +Jedi-vim deletes the word currently under the cursor and puts Vim in insert +mode, where the user is expected to enter the new variable name. Upon leaving +insert mode, jedi-vim then renames all occurrences of the old variable name +with the new one. The number of performed renames is displayed in the command +line. + +------------------------------------------------------------------------------ +5.7. `g:jedi#usages_command` *g:jedi#usages_command* +Function: `jedi#usages()` +Default: n Show usages of a name. + +The quickfix window is populated with a list of all names which point to the +definition of the name under the cursor. + +------------------------------------------------------------------------------ +5.8. Open module by name *:Pyimport* +Function: `jedi#py_import(args)` +Default: :Pyimport e.g. `:Pyimport os` shows os.py in VIM. + +Simulate an import and open that module in VIM. + +============================================================================== +6. Configuration *jedi-vim-configuration* + +Note: You currently have to set these options in your .vimrc. Setting them in +an ftplugin (e.g. ~/.vim/ftplugin/python/jedi-vim-settings.vim) will not work +because jedi-vim is not set up as an filetype plugin, but as a "regular" +plugin. + +------------------------------------------------------------------------------ +6.1. `g:jedi#auto_initialization` *g:jedi#auto_initialization* + +Upon initialization, jedi-vim performs the following steps: + +1. Set the current buffers 'omnifunc' to its own completion function + `jedi#completions` +2. Create mappings to commands specified in |jedi-vim-keybindings| +3. Call `jedi#configure_call_signatures()` if + `g:jedi#show_call_signatures` is set + +You can disable the default initialization routine by setting this option to +0. Beware that you have to perform the above steps yourself, though. + +Options: 0 or 1 +Default: 1 (Perform automatic initialization) + +------------------------------------------------------------------------------ +6.2. `g:jedi#auto_vim_configuration` *g:jedi#auto_vim_configuration* + +Jedi-vim sets 'completeopt' to `menuone,longest,preview` by default, if +'completeopt' is not changed from Vim's default. +It also remaps to in insert mode. + +If you want to keep your own configuration, disable this setting. + +Options: 0 or 1 +Default: 1 (Set 'completeopt' and mapping as described above) + +------------------------------------------------------------------------------ +6.3. `g:jedi#popup_on_dot` *g:jedi#popup_on_dot* + +Jedi-vim automatically starts completion upon typing a period in insert mode. + +However, when working with large modules, this can slow down your typing flow +since you have to wait for jedi to parse the module and show the completion +menu. By disabling this setting, completion is only started when you manually +press the completion key. +You need to also have `g:jedi#completions_enabled` enabled for this. + +Options: 0 or 1 +Default: 1 (Start completion on typing a period) + +------------------------------------------------------------------------------ +6.4. `g:jedi#popup_select_first` *g:jedi#popup_select_first* + +Upon starting completion, jedi-vim can automatically select the first entry +that pops up (without actually inserting it). + +This leads to a better typing flow: As you type more characters, the entries +in the completion menu are narrowed down. If they are narrowed down enough, +you can just press to insert the first match. + +Options: 0 or 1 +Default: 1 (Automatically select first completion entry) + +------------------------------------------------------------------------------ +6.5. `g:jedi#auto_close_doc` *g:jedi#auto_close_doc* + +When doing completion, jedi-vim shows the docstring of the currently selected +item in a preview window. By default, this window is being closed after +insertion of a completion item. + +Set this to 0 to leave the preview window open even after leaving insert mode. +This could be useful if you want to browse longer docstrings. + +Options: 0 or 1 +Default: 1 (Automatically close preview window upon leaving insert mode) + +------------------------------------------------------------------------------ +6.6. `g:jedi#show_call_signatures` *g:jedi#show_call_signatures* + +Jedi-vim can display a small window detailing the arguments of the currently +completed function and highlighting the currently selected argument. This can +be disabled by setting this option to 0. Setting this option to 2 shows call +signatures in the command line instead of a popup window. + +Options: 0, 1, or 2 +Default: 1 (Show call signatures window) + +Note: 'showmode' must be disabled for command line call signatures to be +visible. + +Note: This setting is ignored if |g:jedi#auto_initialization| is set to 0. In +that case, if you want to see call signatures, you have to set it up +manually by calling a function in your configuration file: > + + call jedi#configure_call_signatures() + +------------------------------------------------------------------------------ +6.7. `g:jedi#show_call_signatures_delay` *g:jedi#show_call_signatures_delay* + +The delay to be used with |g:jedi#show_call_signatures|. If it is greater +than 0 it will use Vim's |CursorHoldI| event instead of |CursorMovedI|. +It will temporarily set Vim's |'updatetime'| option during insert mode. + +Options: delay in milliseconds +Default: 500 + +------------------------------------------------------------------------------ +6.8. `g:jedi#use_tabs_not_buffers` *g:jedi#use_tabs_not_buffers* + +You can make jedi-vim open a new tab if you use the "go to", "show +definition", or "related names" commands. When you leave this at the default +(0), they open in the current window instead. + +Options: 0 or 1 +Default: 0 (Command output reuses current window) + +------------------------------------------------------------------------------ +6.9. `g:jedi#squelch_py_warning` *g:jedi#squelch_py_warning* + +When Vim has not been compiled with +python, jedi-vim shows a warning to that +effect and aborts loading itself. Set this to 1 to suppress that warning. + +Options: 0 or 1 +Default: 0 (Warning is shown) + +------------------------------------------------------------------------------ +6.10. `g:jedi#completions_enabled` *g:jedi#completions_enabled* + +If you don't want Jedi completion, but all the other features, you can disable +it in favor of another completion engine (that probably also uses Jedi, like +YCM). + +Options: 0 or 1 +Default: 1 + +------------------------------------------------------------------------------ +6.11. `g:jedi#use_splits_not_buffers` *g:jedi#use_splits_not_buffers* + +If you want to open new split for "go to", you could set this option to the +direction which you want to open a split with. + +Options: top, left, right, bottom or winwidth +Default: "" (not enabled by default) + +Note: with the 'winwidth' option the window is split vertically or horizontally +depending on the width of the window relative to 'textwidth'. This essentially +means that if the window is big enough it will be split vertically but if it is +small a horizontal split happens. + +------------------------------------------------------------------------------ +6.12. `g:jedi#force_py_version` *g:jedi#force_py_version* + +If you have installed multiple Python versions, you can force the Python +version that is going to be used. +You don't have to compile VIM with multiple Python versions. +The variable can be set in the .vimrc like this to force python 2: + +let g:jedi#force_py_version = 2 + +By default jedi loads the latest Python version installed on your system that +can be found. + +This variable can be changed during runtime. + +Options: 2, 2.7, 3, 3.5, 3.6, ... +Default: "auto" +------------------------------------------------------------------------------ +6.13. `g:jedi#smart_auto_mappings` *g:jedi#smart_auto_mappings* + +When you start typing `from module.name` jedi-vim automatically +can add the "import" statement and trigger the autocompletion popup. + +You can enable this using: > + + let g:jedi#smart_auto_mappings = 1 +< +Options: 0 or 1 +Default: 0 (disabled by default) + +------------------------------------------------------------------------------ +6.14. `g:jedi#use_tag_stack` *g:jedi#use_tag_stack* + +Write results of |jedi#goto| to a temporary file and use the |:tjump| command +to enable full |tagstack| functionality. Use of the tag stack allows +returning to the usage of a function with CTRL-T after exploring the +definition with arbitrary changes to the |jumplist|. + +Options: 0 or 1 +Default: 1 (enabled by default) + +------------------------------------------------------------------------------ +6.15. `g:jedi#environment_path` *g:jedi#environment_path* + *b:jedi_environment_path* + +To use a specific virtualenv or a specific Python version it is possible to +set an interpreter. + +Both setting the directory and setting a project is working. + +Examples: "/usr/bin/python3.9", "venv", "../venv", "../venv/bin/python" + +The buffer-local variable `b:jedi_environment_path` can be used to override the +global variable `g:jedi#environment_path`. + +Default: "auto" + +------------------------------------------------------------------------------ +6.16. `g:jedi#added_sys_path` *g:jedi#added_sys_path* + *b:jedi_added_sys_path* + +To add extra sys_path. + +The buffer-local variable `b:jedi_added_sys_path` can be used to add +additional extra sys_path. + +Examples: ["../site-packages"] +Default: [] + +------------------------------------------------------------------------------ +6.17. `g:jedi#case_insensitive_completion` *g:jedi#case_insensitive_completion* + *b:jedi_case_insensitive_completion* + +0 to disable case insensitive completion. +1 to enable case insensitive completion (default). + +The buffer-local variable `b:jedi_case_insensitive_completion` can be used to +override the global variable `g:jedi#case_insensitive_completion`. + +Default: 1 + +============================================================================== +7. Testing *jedi-vim-testing* + +jedi-vim is being tested with a combination of vspec +https://github.com/kana/vim-vspec and py.test http://pytest.org/. + +The tests are in the test subdirectory, you can run them calling:: + + py.test + +The tests are automatically run with `travis +`_. + +============================================================================== +8. Contributing *jedi-vim-contributing* + +We love Pull Requests! Read the instructions in `CONTRIBUTING.md`. + +============================================================================== +9. License *jedi-vim-license* + +Jedi-vim is licensed with the MIT license. + + vim: textwidth=78 et filetype=help:norightleft: diff --git a/bundle/jedi-vim/doc/logotype-a.svg b/bundle/jedi-vim/doc/logotype-a.svg new file mode 100644 index 000000000..80380bddb --- /dev/null +++ b/bundle/jedi-vim/doc/logotype-a.svg @@ -0,0 +1,140 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/bundle/jedi-vim/ftplugin/python/jedi.vim b/bundle/jedi-vim/ftplugin/python/jedi.vim new file mode 100644 index 000000000..5b97b9c2b --- /dev/null +++ b/bundle/jedi-vim/ftplugin/python/jedi.vim @@ -0,0 +1,53 @@ +if !jedi#init_python() + finish +endif +" ------------------------------------------------------------------------ +" Initialization of jedi-vim +" ------------------------------------------------------------------------ + +if g:jedi#auto_initialization + " goto / get_definition / usages + if len(g:jedi#goto_command) + execute 'nnoremap '.g:jedi#goto_command.' :call jedi#goto()' + endif + if len(g:jedi#goto_assignments_command) + execute 'nnoremap '.g:jedi#goto_assignments_command.' :call jedi#goto_assignments()' + endif + if len(g:jedi#goto_definitions_command) + execute 'nnoremap '.g:jedi#goto_definitions_command.' :call jedi#goto_definitions()' + endif + if len(g:jedi#goto_stubs_command) + execute 'nnoremap '.g:jedi#goto_stubs_command.' :call jedi#goto_stubs()' + endif + if len(g:jedi#usages_command) + execute 'nnoremap '.g:jedi#usages_command.' :call jedi#usages()' + endif + " rename + if len(g:jedi#rename_command) + execute 'nnoremap '.g:jedi#rename_command.' :call jedi#rename()' + execute 'vnoremap '.g:jedi#rename_command.' :call jedi#rename_visual()' + endif + " documentation/pydoc + if len(g:jedi#documentation_command) + execute 'nnoremap '.g:jedi#documentation_command.' :call jedi#show_documentation()' + endif + + if g:jedi#show_call_signatures > 0 + call jedi#configure_call_signatures() + endif + + if g:jedi#completions_enabled == 1 + inoremap . .=jedi#complete_string(1) + endif + + if g:jedi#smart_auto_mappings == 1 + inoremap =jedi#smart_auto_mappings() + end + + if g:jedi#auto_close_doc + " close preview if its still open after insert + augroup jedi_preview + autocmd! InsertLeave if pumvisible() == 0|pclose|endif + augroup END + endif +endif diff --git a/bundle/jedi-vim/plugin/jedi.vim b/bundle/jedi-vim/plugin/jedi.vim new file mode 100644 index 000000000..d409240c8 --- /dev/null +++ b/bundle/jedi-vim/plugin/jedi.vim @@ -0,0 +1,77 @@ +"jedi-vim - Omni Completion for python in vim +" Maintainer: David Halter +" +" This part of the software is just the vim interface. The really big deal is +" the Jedi Python library. + +if get(g:, 'jedi#auto_vim_configuration', 1) + " jedi-vim doesn't work in compatible mode (vim script syntax problems) + if &compatible + " vint: -ProhibitSetNoCompatible + set nocompatible + " vint: +ProhibitSetNoCompatible + endif + + " jedi-vim really needs, otherwise jedi-vim cannot start. + filetype plugin on + + augroup jedi_pyi + au! + autocmd BufNewFile,BufRead *.pyi set filetype=python + augroup END + + " Change completeopt, but only if it was not set already. + " This gets done on VimEnter, since otherwise Vim fails to restore the + " screen. Neovim is not affected, this is likely caused by using + " :redir/execute() before the (alternate) terminal is configured. + function! s:setup_completeopt() + if exists('*execute') + let completeopt = execute('silent verb set completeopt?') + else + redir => completeopt + silent verb set completeopt? + redir END + endif + if len(split(completeopt, '\n')) == 1 + set completeopt=menuone,longest,preview + endif + endfunction + if has('nvim') + call s:setup_completeopt() + else + augroup jedi_startup + au! + autocmd VimEnter * call s:setup_completeopt() + augroup END + endif + + if len(mapcheck('', 'i')) == 0 + inoremap + endif +endif + +" Pyimport command +command! -nargs=1 -complete=custom,jedi#py_import_completions Pyimport :call jedi#py_import() + +command! -nargs=? -complete=file JediChooseEnvironment :call jedi#choose_environment() +command! -nargs=? -complete=file JediLoadProject :call jedi#load_project() + + +function! s:jedi_debug_info() + " Ensure the autoload file has been loaded (and ignore any errors, which + " will be displayed with the debug info). + let unset = {} + let saved_squelch_py_warning = get(g:, 'jedi#squelch_py_warning', unset) + let g:jedi#squelch_py_warning = 1 + call jedi#init_python() + if saved_squelch_py_warning is unset + unlet g:jedi#squelch_py_warning + else + let g:jedi#squelch_py_warning = saved_squelch_py_warning + endif + call jedi#debug_info() +endfunction +command! -nargs=0 -bar JediDebugInfo call s:jedi_debug_info() +command! -nargs=0 -bang JediClearCache call jedi#clear_cache(0) + +" vim: set et ts=4: diff --git a/bundle/jedi-vim/pythonx/jedi/.editorconfig b/bundle/jedi-vim/pythonx/jedi/.editorconfig new file mode 100644 index 000000000..5374960ac --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/.editorconfig @@ -0,0 +1,14 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[*.py] +indent_size = 4 + +[*.md] +indent_size = 2 diff --git a/bundle/jedi-vim/pythonx/jedi/.gitattributes b/bundle/jedi-vim/pythonx/jedi/.gitattributes new file mode 100644 index 000000000..74f0a3d85 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/.gitattributes @@ -0,0 +1,10 @@ +# all end-of-lines are normalized to LF when written to the repository +# https://git-scm.com/docs/gitattributes#_text +* text=auto + +# force all text files on the working dir to have LF line endings +# https://git-scm.com/docs/gitattributes#_eol +* text eol=lf + +# PNGs are not text and should not be normalized +*.png -text diff --git a/bundle/jedi-vim/pythonx/jedi/.github/FUNDING.yml b/bundle/jedi-vim/pythonx/jedi/.github/FUNDING.yml new file mode 100644 index 000000000..418e47a80 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/.github/FUNDING.yml @@ -0,0 +1 @@ +github: [davidhalter] diff --git a/bundle/jedi-vim/pythonx/jedi/.github/workflows/ci.yml b/bundle/jedi-vim/pythonx/jedi/.github/workflows/ci.yml new file mode 100644 index 000000000..e306eb50e --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/.github/workflows/ci.yml @@ -0,0 +1,73 @@ +name: ci +on: [push, pull_request] + +jobs: + tests: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-20.04, windows-2019] + python-version: ["3.10", "3.9", "3.8", "3.7", "3.6"] + environment: ['3.8', '3.10', '3.9', '3.7', '3.6', 'interpreter'] + steps: + - name: Checkout code + uses: actions/checkout@v2 + with: + submodules: recursive + + - uses: actions/setup-python@v2 + if: ${{ matrix.environment != 'interpreter' }} + with: + python-version: ${{ matrix.environment }} + + - uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: 'pip install .[testing]' + + - name: Run tests + run: python -m pytest + env: + JEDI_TEST_ENVIRONMENT: ${{ matrix.environment }} + + code-quality: + runs-on: ubuntu-20.04 + steps: + - name: Checkout code + uses: actions/checkout@v2 + with: + submodules: recursive + + - name: Install dependencies + run: 'pip install .[qa]' + + - name: Run tests + run: | + python -m flake8 jedi setup.py + python -m mypy jedi sith.py + + coverage: + runs-on: ubuntu-20.04 + + steps: + - name: Checkout code + uses: actions/checkout@v2 + with: + submodules: recursive + + - name: Install dependencies + run: 'pip install .[testing] coverage' + + - name: Run tests + run: | + python -m coverage run --source jedi -m pytest + python -m coverage report + + - name: Upload coverage data + run: | + pip install --quiet codecov coveralls + python -m coverage xml + python -m coverage report -m + bash <(curl -s https://codecov.io/bash) -X gcov -X coveragepy -X search -X fix -X xcode -f coverage.xml diff --git a/bundle/jedi-vim/pythonx/jedi/.gitignore b/bundle/jedi-vim/pythonx/jedi/.gitignore new file mode 100644 index 000000000..bdb15c947 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/.gitignore @@ -0,0 +1,16 @@ +*~ +*.sw? +*.pyc +.ropeproject +.coveralls.yml +.coverage +.idea +/build/ +/docs/_build/ +/dist/ +jedi.egg-info/ +record.json +/.cache/ +/.pytest_cache +/.mypy_cache +/venv/ diff --git a/bundle/jedi-vim/pythonx/jedi/.gitmodules b/bundle/jedi-vim/pythonx/jedi/.gitmodules new file mode 100644 index 000000000..1a59e543c --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/.gitmodules @@ -0,0 +1,6 @@ +[submodule "jedi/third_party/typeshed"] + path = jedi/third_party/typeshed + url = https://github.com/davidhalter/typeshed.git +[submodule "jedi/third_party/django-stubs"] + path = jedi/third_party/django-stubs + url = https://github.com/davidhalter/django-stubs diff --git a/bundle/jedi-vim/pythonx/jedi/.readthedocs.yml b/bundle/jedi-vim/pythonx/jedi/.readthedocs.yml new file mode 100644 index 000000000..1893a4fdf --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/.readthedocs.yml @@ -0,0 +1,2 @@ +python: + pip_install: true diff --git a/bundle/jedi-vim/pythonx/jedi/AUTHORS.txt b/bundle/jedi-vim/pythonx/jedi/AUTHORS.txt new file mode 100644 index 000000000..b8b10a937 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/AUTHORS.txt @@ -0,0 +1,68 @@ +Main Authors +------------ + +- David Halter (@davidhalter) +- Takafumi Arakaki (@tkf) + +Code Contributors +----------------- + +- Danilo Bargen (@dbrgn) +- Laurens Van Houtven (@lvh) <_@lvh.cc> +- Aldo Stracquadanio (@Astrac) +- Jean-Louis Fuchs (@ganwell) +- tek (@tek) +- Yasha Borevich (@jjay) +- Aaron Griffin +- andviro (@andviro) +- Mike Gilbert (@floppym) +- Aaron Meurer (@asmeurer) +- Lubos Trilety +- Akinori Hattori (@hattya) +- srusskih (@srusskih) +- Steven Silvester (@blink1073) +- Colin Duquesnoy (@ColinDuquesnoy) +- Jorgen Schaefer (@jorgenschaefer) +- Fredrik Bergroth (@fbergroth) +- Mathias Fußenegger (@mfussenegger) +- Syohei Yoshida (@syohex) +- ppalucky (@ppalucky) +- immerrr (@immerrr) immerrr@gmail.com +- Albertas Agejevas (@alga) +- Savor d'Isavano (@KenetJervet) +- Phillip Berndt (@phillipberndt) +- Ian Lee (@IanLee1521) +- Farkhad Khatamov (@hatamov) +- Kevin Kelley (@kelleyk) +- Sid Shanker (@squidarth) +- Reinoud Elhorst (@reinhrst) +- Guido van Rossum (@gvanrossum) +- Dmytro Sadovnychyi (@sadovnychyi) +- Cristi Burcă (@scribu) +- bstaint (@bstaint) +- Mathias Rav (@Mortal) +- Daniel Fiterman (@dfit99) +- Simon Ruggier (@sruggier) +- Élie Gouzien (@ElieGouzien) +- Robin Roth (@robinro) +- Malte Plath (@langsamer) +- Anton Zub (@zabulazza) +- Maksim Novikov (@m-novikov) +- Tobias Rzepka (@TobiasRzepka) +- micbou (@micbou) +- Dima Gerasimov (@karlicoss) +- Max Woerner Chase (@mwchase) +- Johannes Maria Frank (@jmfrank63) +- Shane Steinert-Threlkeld (@shanest) +- Tim Gates (@timgates42) +- Lior Goldberg (@goldberglior) +- Ryan Clary (@mrclary) +- Max Mäusezahl (@mmaeusezahl) +- Vladislav Serebrennikov (@endilll) +- Andrii Kolomoiets (@muffinmad) +- Leo Ryu (@Leo-Ryu) +- Joseph Birkner (@josephbirkner) + +And a few more "anonymous" contributors. + +Note: (@user) means a github user name. diff --git a/bundle/jedi-vim/pythonx/jedi/CHANGELOG.rst b/bundle/jedi-vim/pythonx/jedi/CHANGELOG.rst new file mode 100644 index 000000000..b0986667b --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/CHANGELOG.rst @@ -0,0 +1,304 @@ +.. :changelog: + +Changelog +--------- + +Unreleased +++++++++++ + +0.18.1 (2021-11-17) ++++++++++++++++++++ + +- Implict namespaces are now a separate types in ``Name().type`` +- Python 3.10 support +- Mostly bugfixes + +0.18.0 (2020-12-25) ++++++++++++++++++++ + +- Dropped Python 2 and Python 3.5 +- Using ``pathlib.Path()`` as an output instead of ``str`` in most places: + - ``Project.path`` + - ``Script.path`` + - ``Definition.module_path`` + - ``Refactoring.get_renames`` + - ``Refactoring.get_changed_files`` +- Functions with ``@property`` now return ``property`` instead of ``function`` + in ``Name().type`` +- Started using annotations +- Better support for the walrus operator +- Project attributes are now read accessible +- Removed all deprecations + +This is likely going to be the last minor release before 1.0. + +0.17.2 (2020-07-17) ++++++++++++++++++++ + +- Added an option to pass environment variables to ``Environment`` +- ``Project(...).path`` exists now +- Support for Python 3.9 +- A few bugfixes + +This will be the last release that supports Python 2 and Python 3.5. +``0.18.0`` will be Python 3.6+. + +0.17.1 (2020-06-20) ++++++++++++++++++++ + +- Django ``Model`` meta class support +- Django Manager support (completion on Managers/QuerySets) +- Added Django Stubs to Jedi, thanks to all contributors of the + `Django Stubs `_ project +- Added ``SyntaxError.get_message`` +- Python 3.9 support +- Bugfixes (mostly towards Generics) + +0.17.0 (2020-04-14) ++++++++++++++++++++ + +- Added ``Project`` support. This allows a user to specify which folders Jedi + should work with. +- Added support for Refactoring. The following refactorings have been + implemented: ``Script.rename``, ``Script.inline``, + ``Script.extract_variable`` and ``Script.extract_function``. +- Added ``Script.get_syntax_errors`` to display syntax errors in the current + script. +- Added code search capabilities both for individual files and projects. The + new functions are ``Project.search``, ``Project.complete_search``, + ``Script.search`` and ``Script.complete_search``. +- Added ``Script.help`` to make it easier to display a help window to people. + Now returns pydoc information as well for Python keywords/operators. This + means that on the class keyword it will now return the docstring of Python's + builtin function ``help('class')``. +- The API documentation is now way more readable and complete. Check it out + under https://jedi.readthedocs.io. A lot of it has been rewritten. +- Removed Python 3.4 support +- Many bugfixes + +This is likely going to be the last minor version that supports Python 2 and +Python3.5. Bugfixes will be provided in 0.17.1+. The next minor/major version +will probably be Jedi 1.0.0. + +0.16.0 (2020-01-26) ++++++++++++++++++++ + +- **Added** ``Script.get_context`` to get information where you currently are. +- Completions/type inference of **Pytest fixtures**. +- Tensorflow, Numpy and Pandas completions should now be about **4-10x faster** + after the first time they are used. +- Dict key completions are working now. e.g. ``d = {1000: 3}; d[10`` will + expand to ``1000``. +- Completion for "proxies" works now. These are classes that have a + ``__getattr__(self, name)`` method that does a ``return getattr(x, name)``. + after loading them initially. +- Goto on a function/attribute in a class now goes to the definition in its + super class. +- Big **Script API Changes**: + - The line and column parameters of ``jedi.Script`` are now deprecated + - ``completions`` deprecated, use ``complete`` instead + - ``goto_assignments`` deprecated, use ``goto`` instead + - ``goto_definitions`` deprecated, use ``infer`` instead + - ``call_signatures`` deprecated, use ``get_signatures`` instead + - ``usages`` deprecated, use ``get_references`` instead + - ``jedi.names`` deprecated, use ``jedi.Script(...).get_names()`` +- ``BaseName.goto_assignments`` renamed to ``BaseName.goto`` +- Add follow_imports to ``Name.goto``. Now its signature matches + ``Script.goto``. +- **Python 2 support deprecated**. For this release it is best effort. Python 2 + has reached the end of its life and now it's just about a smooth transition. + Bugs for Python 2 will not be fixed anymore and a third of the tests are + already skipped. +- Removed ``settings.no_completion_duplicates``. It wasn't tested and nobody + was probably using it anyway. +- Removed ``settings.use_filesystem_cache`` and + ``settings.additional_dynamic_modules``, they have no usage anymore. Pretty + much nobody was probably using them. + +0.15.2 (2019-12-20) ++++++++++++++++++++ + +- Signatures are now detected a lot better +- Add fuzzy completions with ``Script(...).completions(fuzzy=True)`` +- Files bigger than one MB (about 20kLOC) get cropped to avoid getting + stuck completely. +- Many small Bugfixes +- A big refactoring around contexts/values + +0.15.1 (2019-08-13) ++++++++++++++++++++ + +- Small bugfix and removal of a print statement + +0.15.0 (2019-08-11) ++++++++++++++++++++ + +- Added file path completions, there's a **new** ``Completion.type`` now: + ``path``. Example: ``'/ho`` -> ``'/home/`` +- ``*args``/``**kwargs`` resolving. If possible Jedi replaces the parameters + with the actual alternatives. +- Better support for enums/dataclasses +- When using Interpreter, properties are now executed, since a lot of people + have complained about this. Discussion in #1299, #1347. + +New APIs: + +- ``Name.get_signatures() -> List[Signature]``. Signatures are similar to + ``CallSignature``. ``Name.params`` is therefore deprecated. +- ``Signature.to_string()`` to format signatures. +- ``Signature.params -> List[ParamName]``, ParamName has the + following additional attributes ``infer_default()``, ``infer_annotation()``, + ``to_string()``, and ``kind``. +- ``Name.execute() -> List[Name]``, makes it possible to infer + return values of functions. + + +0.14.1 (2019-07-13) ++++++++++++++++++++ + +- CallSignature.index should now be working a lot better +- A couple of smaller bugfixes + +0.14.0 (2019-06-20) ++++++++++++++++++++ + +- Added ``goto_*(prefer_stubs=True)`` as well as ``goto_*(prefer_stubs=True)`` +- Stubs are used now for type inference +- Typeshed is used for better type inference +- Reworked Name.full_name, should have more correct return values + +0.13.3 (2019-02-24) ++++++++++++++++++++ + +- Fixed an issue with embedded Python, see https://github.com/davidhalter/jedi-vim/issues/870 + +0.13.2 (2018-12-15) ++++++++++++++++++++ + +- Fixed a bug that led to Jedi spawning a lot of subprocesses. + +0.13.1 (2018-10-02) ++++++++++++++++++++ + +- Bugfixes, because tensorflow completions were still slow. + +0.13.0 (2018-10-02) ++++++++++++++++++++ + +- A small release. Some bug fixes. +- Remove Python 3.3 support. Python 3.3 support has been dropped by the Python + foundation. +- Default environments are now using the same Python version as the Python + process. In 0.12.x, we used to load the latest Python version on the system. +- Added ``include_builtins`` as a parameter to usages. +- ``goto_assignments`` has a new ``follow_builtin_imports`` parameter that + changes the previous behavior slightly. + +0.12.1 (2018-06-30) ++++++++++++++++++++ + +- This release forces you to upgrade parso. If you don't, nothing will work + anymore. Otherwise changes should be limited to bug fixes. Unfortunately Jedi + still uses a few internals of parso that make it hard to keep compatibility + over multiple releases. Parso >=0.3.0 is going to be needed. + +0.12.0 (2018-04-15) ++++++++++++++++++++ + +- Virtualenv/Environment support +- F-String Completion/Goto Support +- Cannot crash with segfaults anymore +- Cleaned up import logic +- Understand async/await and autocomplete it (including async generators) +- Better namespace completions +- Passing tests for Windows (including CI for Windows) +- Remove Python 2.6 support + +0.11.1 (2017-12-14) ++++++++++++++++++++ + +- Parso update - the caching layer was broken +- Better usages - a lot of internal code was ripped out and improved. + +0.11.0 (2017-09-20) ++++++++++++++++++++ + +- Split Jedi's parser into a separate project called ``parso``. +- Avoiding side effects in REPL completion. +- Numpy docstring support should be much better. +- Moved the `settings.*recursion*` away, they are no longer usable. + +0.10.2 (2017-04-05) ++++++++++++++++++++ + +- Python Packaging sucks. Some files were not included in 0.10.1. + +0.10.1 (2017-04-05) ++++++++++++++++++++ + +- Fixed a few very annoying bugs. +- Prepared the parser to be factored out of Jedi. + +0.10.0 (2017-02-03) ++++++++++++++++++++ + +- Actual semantic completions for the complete Python syntax. +- Basic type inference for ``yield from`` PEP 380. +- PEP 484 support (most of the important features of it). Thanks Claude! (@reinhrst) +- Added ``get_line_code`` to ``Name`` and ``Completion`` objects. +- Completely rewritten the type inference engine. +- A new and better parser for (fast) parsing diffs of Python code. + +0.9.0 (2015-04-10) +++++++++++++++++++ + +- The import logic has been rewritten to look more like Python's. There is now + an ``InferState.modules`` import cache, which resembles ``sys.modules``. +- Integrated the parser of 2to3. This will make refactoring possible. It will + also be possible to check for error messages (like compiling an AST would give) + in the future. +- With the new parser, the type inference also completely changed. It's now + simpler and more readable. +- Completely rewritten REPL completion. +- Added ``jedi.names``, a command to do static analysis. Thanks to that + sourcegraph guys for sponsoring this! +- Alpha version of the linter. + + +0.8.1 (2014-07-23) ++++++++++++++++++++ + +- Bugfix release, the last release forgot to include files that improve + autocompletion for builtin libraries. Fixed. + +0.8.0 (2014-05-05) ++++++++++++++++++++ + +- Memory Consumption for compiled modules (e.g. builtins, sys) has been reduced + drastically. Loading times are down as well (it takes basically as long as an + import). +- REPL completion is starting to become usable. +- Various small API changes. Generally this release focuses on stability and + refactoring of internal APIs. +- Introducing operator precedence, which makes calculating correct Array + indices and ``__getattr__`` strings possible. + +0.7.0 (2013-08-09) +++++++++++++++++++ + +- Switched from LGPL to MIT license. +- Added an Interpreter class to the API to make autocompletion in REPL + possible. +- Added autocompletion support for namespace packages. +- Add sith.py, a new random testing method. + +0.6.0 (2013-05-14) +++++++++++++++++++ + +- Much faster parser with builtin part caching. +- A test suite, thanks @tkf. + +0.5 versions (2012) ++++++++++++++++++++ + +- Initial development. diff --git a/bundle/jedi-vim/pythonx/jedi/CONTRIBUTING.md b/bundle/jedi-vim/pythonx/jedi/CONTRIBUTING.md new file mode 100644 index 000000000..d791bae93 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/CONTRIBUTING.md @@ -0,0 +1,8 @@ +Pull Requests are great. + + 1. Fork the Repo on github. + 2. If you are adding functionality or fixing a bug, please add a test! + 3. Add your name to AUTHORS.txt + 4. Push to your fork and submit a pull request. + +**Try to use the PEP8 style guide** (and it's ok to have a line length of 100 characters). diff --git a/bundle/jedi-vim/pythonx/jedi/LICENSE.txt b/bundle/jedi-vim/pythonx/jedi/LICENSE.txt new file mode 100644 index 000000000..94f954567 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/LICENSE.txt @@ -0,0 +1,24 @@ +All contributions towards Jedi are MIT licensed. + +------------------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) <2013> + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/bundle/jedi-vim/pythonx/jedi/MANIFEST.in b/bundle/jedi-vim/pythonx/jedi/MANIFEST.in new file mode 100644 index 000000000..75a895c67 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/MANIFEST.in @@ -0,0 +1,15 @@ +include README.rst +include CHANGELOG.rst +include LICENSE.txt +include AUTHORS.txt +include .coveragerc +include sith.py +include conftest.py +include pytest.ini +recursive-include jedi/third_party *.pyi +include jedi/third_party/typeshed/LICENSE +include jedi/third_party/django-stubs/LICENSE.txt +include jedi/third_party/typeshed/README +recursive-include test * +recursive-include docs * +recursive-exclude * *.pyc diff --git a/bundle/jedi-vim/pythonx/jedi/README.rst b/bundle/jedi-vim/pythonx/jedi/README.rst new file mode 100644 index 000000000..7d0d71065 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/README.rst @@ -0,0 +1,216 @@ +#################################################################################### +Jedi - an awesome autocompletion, static analysis and refactoring library for Python +#################################################################################### + +.. image:: http://isitmaintained.com/badge/open/davidhalter/jedi.svg + :target: https://github.com/davidhalter/jedi/issues + :alt: The percentage of open issues and pull requests + +.. image:: http://isitmaintained.com/badge/resolution/davidhalter/jedi.svg + :target: https://github.com/davidhalter/jedi/issues + :alt: The resolution time is the median time an issue or pull request stays open. + +.. image:: https://github.com/davidhalter/jedi/workflows/ci/badge.svg?branch=master + :target: https://github.com/davidhalter/jedi/actions + :alt: Tests + +.. image:: https://pepy.tech/badge/jedi + :target: https://pepy.tech/project/jedi + :alt: PyPI Downloads + + +Jedi is a static analysis tool for Python that is typically used in +IDEs/editors plugins. Jedi has a focus on autocompletion and goto +functionality. Other features include refactoring, code search and finding +references. + +Jedi has a simple API to work with. There is a reference implementation as a +`VIM-Plugin `_. Autocompletion in your +REPL is also possible, IPython uses it natively and for the CPython REPL you +can install it. Jedi is well tested and bugs should be rare. + +Jedi can currently be used with the following editors/projects: + +- Vim (jedi-vim_, YouCompleteMe_, deoplete-jedi_, completor.vim_) +- `Visual Studio Code`_ (via `Python Extension `_) +- Emacs (Jedi.el_, company-mode_, elpy_, anaconda-mode_, ycmd_) +- Sublime Text (SublimeJEDI_ [ST2 + ST3], anaconda_ [only ST3]) +- TextMate_ (Not sure if it's actually working) +- Kate_ version 4.13+ supports it natively, you have to enable it, though. [`see + `_] +- Atom_ (autocomplete-python-jedi_) +- `GNOME Builder`_ (with support for GObject Introspection) +- Gedit (gedi_) +- wdb_ - Web Debugger +- `Eric IDE`_ (Available as a plugin) +- `IPython 6.0.0+ `_ +- `xonsh shell `_ has `jedi extension `_ + +and many more! + +There are a few language servers that use Jedi: + +- `jedi-language-server `_ +- `python-language-server `_ +- `anakin-language-server `_ + +Here are some pictures taken from jedi-vim_: + +.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_complete.png + +Completion for almost anything: + +.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_function.png + +Documentation: + +.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_pydoc.png + + +Get the latest version from `github `_ +(master branch should always be kind of stable/working). + +Docs are available at `https://jedi.readthedocs.org/en/latest/ +`_. Pull requests with enhancements +and/or fixes are awesome and most welcome. Jedi uses `semantic versioning +`_. + +If you want to stay **up-to-date** with releases, please **subscribe** to this +mailing list: https://groups.google.com/g/jedi-announce. To subscribe you can +simply send an empty email to ``jedi-announce+subscribe@googlegroups.com``. + +Issues & Questions +================== + +You can file issues and questions in the `issue tracker +`. Alternatively you can also ask on +`Stack Overflow `_ with +the label ``python-jedi``. + +Installation +============ + +`Check out the docs `_. + +Features and Limitations +======================== + +Jedi's features are listed here: +`Features `_. + +You can run Jedi on Python 3.6+ but it should also +understand code that is older than those versions. Additionally you should be +able to use `Virtualenvs `_ +very well. + +Tips on how to use Jedi efficiently can be found `here +`_. + +API +--- + +You can find a comprehensive documentation for the +`API here `_. + +Autocompletion / Goto / Documentation +------------------------------------- + +There are the following commands: + +- ``jedi.Script.goto`` +- ``jedi.Script.infer`` +- ``jedi.Script.help`` +- ``jedi.Script.complete`` +- ``jedi.Script.get_references`` +- ``jedi.Script.get_signatures`` +- ``jedi.Script.get_context`` + +The returned objects are very powerful and are really all you might need. + +Autocompletion in your REPL (IPython, etc.) +------------------------------------------- + +Jedi is a dependency of IPython. Autocompletion in IPython with Jedi is +therefore possible without additional configuration. + +Here is an `example video `_ how REPL completion +can look like. +For the ``python`` shell you can enable tab completion in a `REPL +`_. + +Static Analysis +--------------- + +For a lot of forms of static analysis, you can try to use +``jedi.Script(...).get_names``. It will return a list of names that you can +then filter and work with. There is also a way to list the syntax errors in a +file: ``jedi.Script.get_syntax_errors``. + + +Refactoring +----------- + +Jedi supports the following refactorings: + +- ``jedi.Script.inline`` +- ``jedi.Script.rename`` +- ``jedi.Script.extract_function`` +- ``jedi.Script.extract_variable`` + +Code Search +----------- + +There is support for module search with ``jedi.Script.search``, and project +search for ``jedi.Project.search``. The way to search is either by providing a +name like ``foo`` or by using dotted syntax like ``foo.bar``. Additionally you +can provide the API type like ``class foo.bar.Bar``. There are also the +functions ``jedi.Script.complete_search`` and ``jedi.Project.complete_search``. + +Development +=========== + +There's a pretty good and extensive `development documentation +`_. + +Testing +======= + +The test suite uses ``pytest``:: + + pip install pytest + +If you want to test only a specific Python version (e.g. Python 3.8), it is as +easy as:: + + python3.8 -m pytest + +For more detailed information visit the `testing documentation +`_. + +Acknowledgements +================ + +Thanks a lot to all the +`contributors `_! + + +.. _jedi-vim: https://github.com/davidhalter/jedi-vim +.. _youcompleteme: https://github.com/ycm-core/YouCompleteMe +.. _deoplete-jedi: https://github.com/zchee/deoplete-jedi +.. _completor.vim: https://github.com/maralla/completor.vim +.. _Jedi.el: https://github.com/tkf/emacs-jedi +.. _company-mode: https://github.com/syohex/emacs-company-jedi +.. _elpy: https://github.com/jorgenschaefer/elpy +.. _anaconda-mode: https://github.com/proofit404/anaconda-mode +.. _ycmd: https://github.com/abingham/emacs-ycmd +.. _sublimejedi: https://github.com/srusskih/SublimeJEDI +.. _anaconda: https://github.com/DamnWidget/anaconda +.. _wdb: https://github.com/Kozea/wdb +.. _TextMate: https://github.com/lawrenceakka/python-jedi.tmbundle +.. _Kate: https://kate-editor.org +.. _Atom: https://atom.io/ +.. _autocomplete-python-jedi: https://atom.io/packages/autocomplete-python-jedi +.. _GNOME Builder: https://wiki.gnome.org/Apps/Builder +.. _Visual Studio Code: https://code.visualstudio.com/ +.. _gedi: https://github.com/isamert/gedi +.. _Eric IDE: https://eric-ide.python-projects.org diff --git a/bundle/jedi-vim/pythonx/jedi/conftest.py b/bundle/jedi-vim/pythonx/jedi/conftest.py new file mode 100644 index 000000000..c5d88f3bd --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/conftest.py @@ -0,0 +1,172 @@ +import tempfile +import shutil +import os +import sys +from functools import partial + +import pytest + +import jedi +from jedi.api.environment import get_system_environment, InterpreterEnvironment +from test.helpers import test_dir + +collect_ignore = [ + 'setup.py', + 'jedi/__main__.py', + 'jedi/inference/compiled/subprocess/__main__.py', + 'build/', + 'test/examples', + 'sith.py', +] + + +# The following hooks (pytest_configure, pytest_unconfigure) are used +# to modify `jedi.settings.cache_directory` because `clean_jedi_cache` +# has no effect during doctests. Without these hooks, doctests uses +# user's cache (e.g., ~/.cache/jedi/). We should remove this +# workaround once the problem is fixed in pytest. +# +# See: +# - https://github.com/davidhalter/jedi/pull/168 +# - https://bitbucket.org/hpk42/pytest/issue/275/ + +jedi_cache_directory_orig = None +jedi_cache_directory_temp = None + + +def pytest_addoption(parser): + parser.addoption("--jedi-debug", "-D", action='store_true', + help="Enables Jedi's debug output.") + + parser.addoption("--warning-is-error", action='store_true', + help="Warnings are treated as errors.") + + parser.addoption("--env", action='store', + help="Execute the tests in that environment (e.g. 39 for python3.9).") + parser.addoption("--interpreter-env", "-I", action='store_true', + help="Don't use subprocesses to guarantee having safe " + "code execution. Useful for debugging.") + + +def pytest_configure(config): + global jedi_cache_directory_orig, jedi_cache_directory_temp + jedi_cache_directory_orig = jedi.settings.cache_directory + jedi_cache_directory_temp = tempfile.mkdtemp(prefix='jedi-test-') + jedi.settings.cache_directory = jedi_cache_directory_temp + + if config.option.jedi_debug: + jedi.set_debug_function() + + if config.option.warning_is_error: + import warnings + warnings.simplefilter("error") + + +def pytest_unconfigure(config): + global jedi_cache_directory_orig, jedi_cache_directory_temp + jedi.settings.cache_directory = jedi_cache_directory_orig + shutil.rmtree(jedi_cache_directory_temp) + + +@pytest.fixture(scope='session') +def clean_jedi_cache(request): + """ + Set `jedi.settings.cache_directory` to a temporary directory during test. + + Note that you can't use built-in `tmpdir` and `monkeypatch` + fixture here because their scope is 'function', which is not used + in 'session' scope fixture. + + This fixture is activated in ../pytest.ini. + """ + from jedi import settings + old = settings.cache_directory + tmp = tempfile.mkdtemp(prefix='jedi-test-') + settings.cache_directory = tmp + + @request.addfinalizer + def restore(): + settings.cache_directory = old + shutil.rmtree(tmp) + + +@pytest.fixture(scope='session') +def environment(request): + version = request.config.option.env + if version is None: + v = str(sys.version_info[0]) + str(sys.version_info[1]) + version = os.environ.get('JEDI_TEST_ENVIRONMENT', v) + + if request.config.option.interpreter_env or version == 'interpreter': + return InterpreterEnvironment() + + if '.' not in version: + version = version[0] + '.' + version[1:] + return get_system_environment(version) + + +@pytest.fixture(scope='session') +def Script(environment): + return partial(jedi.Script, environment=environment) + + +@pytest.fixture(scope='session') +def ScriptWithProject(Script): + project = jedi.Project(test_dir) + return partial(jedi.Script, project=project) + + +@pytest.fixture(scope='session') +def get_names(Script): + return lambda code, **kwargs: Script(code).get_names(**kwargs) + + +@pytest.fixture(scope='session', params=['goto', 'infer']) +def goto_or_infer(request, Script): + return lambda code, *args, **kwargs: getattr(Script(code), request.param)(*args, **kwargs) + + +@pytest.fixture(scope='session', params=['goto', 'help']) +def goto_or_help(request, Script): + return lambda code, *args, **kwargs: getattr(Script(code), request.param)(*args, **kwargs) + + +@pytest.fixture(scope='session', params=['goto', 'help', 'infer']) +def goto_or_help_or_infer(request, Script): + def do(code, *args, **kwargs): + return getattr(Script(code), request.param)(*args, **kwargs) + + do.type = request.param + return do + + +@pytest.fixture(scope='session', params=['goto', 'complete', 'help']) +def goto_or_complete(request, Script): + return lambda code, *args, **kwargs: getattr(Script(code), request.param)(*args, **kwargs) + + +@pytest.fixture(scope='session') +def has_django(environment): + script = jedi.Script('import django', environment=environment) + return bool(script.infer()) + + +@pytest.fixture(scope='session') +def jedi_path(): + return os.path.dirname(__file__) + + +@pytest.fixture() +def skip_pre_python38(environment): + if environment.version_info < (3, 8): + # This if is just needed to avoid that tests ever skip way more than + # they should for all Python versions. + pytest.skip() + + +@pytest.fixture() +def skip_pre_python37(environment): + if environment.version_info < (3, 7): + # This if is just needed to avoid that tests ever skip way more than + # they should for all Python versions. + pytest.skip() diff --git a/bundle/jedi-vim/pythonx/jedi/deploy-master.sh b/bundle/jedi-vim/pythonx/jedi/deploy-master.sh new file mode 100644 index 000000000..e924eaa68 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/deploy-master.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +# The script creates a separate folder in build/ and creates tags there, pushes +# them and then uploads the package to PyPI. + +set -eu -o pipefail + +BASE_DIR=$(dirname $(readlink -f "$0")) +cd $BASE_DIR + +git fetch --tags + +PROJECT_NAME=jedi +BRANCH=master +BUILD_FOLDER=build + +[ -d $BUILD_FOLDER ] || mkdir $BUILD_FOLDER +# Remove the previous deployment first. +# Checkout the right branch +cd $BUILD_FOLDER +rm -rf $PROJECT_NAME +git clone .. $PROJECT_NAME +cd $PROJECT_NAME +git checkout $BRANCH +git submodule update --init + +# Test first. +pytest + +# Create tag +tag=v$(python3 -c "import $PROJECT_NAME; print($PROJECT_NAME.__version__)") + +master_ref=$(git show-ref -s heads/$BRANCH) +tag_ref=$(git show-ref -s $tag || true) +if [[ $tag_ref ]]; then + if [[ $tag_ref != $master_ref ]]; then + echo 'Cannot tag something that has already been tagged with another commit.' + exit 1 + fi +else + git tag -a $tag + git push --tags +fi + +# Package and upload to PyPI +#rm -rf dist/ - Not needed anymore, because the folder is never reused. +echo `pwd` +python3 setup.py sdist bdist_wheel +# Maybe do a pip install twine before. +twine upload dist/* + +cd $BASE_DIR +# The tags have been pushed to this repo. Push the tags to github, now. +git push --tags diff --git a/bundle/jedi-vim/pythonx/jedi/docs/Makefile b/bundle/jedi-vim/pythonx/jedi/docs/Makefile new file mode 100644 index 000000000..14cfdf4bc --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/docs/Makefile @@ -0,0 +1,153 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Jedi.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Jedi.qhc" + +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/Jedi" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Jedi" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/bundle/jedi-vim/pythonx/jedi/docs/README.md b/bundle/jedi-vim/pythonx/jedi/docs/README.md new file mode 100644 index 000000000..5eac8a2f4 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/docs/README.md @@ -0,0 +1,14 @@ +Installation +------------ + +Install the graphviz library:: + + sudo apt-get install graphviz + +Install sphinx:: + + sudo pip install sphinx + +You might also need to install the Python graphviz interface:: + + sudo pip install graphviz diff --git a/bundle/jedi-vim/pythonx/jedi/docs/_screenshots/screenshot_complete.png b/bundle/jedi-vim/pythonx/jedi/docs/_screenshots/screenshot_complete.png new file mode 100644 index 000000000..2bafad6a2 Binary files /dev/null and b/bundle/jedi-vim/pythonx/jedi/docs/_screenshots/screenshot_complete.png differ diff --git a/bundle/jedi-vim/pythonx/jedi/docs/_screenshots/screenshot_function.png b/bundle/jedi-vim/pythonx/jedi/docs/_screenshots/screenshot_function.png new file mode 100644 index 000000000..6109703e4 Binary files /dev/null and b/bundle/jedi-vim/pythonx/jedi/docs/_screenshots/screenshot_function.png differ diff --git a/bundle/jedi-vim/pythonx/jedi/docs/_screenshots/screenshot_pydoc.png b/bundle/jedi-vim/pythonx/jedi/docs/_screenshots/screenshot_pydoc.png new file mode 100644 index 000000000..a399d5f73 Binary files /dev/null and b/bundle/jedi-vim/pythonx/jedi/docs/_screenshots/screenshot_pydoc.png differ diff --git a/bundle/jedi-vim/pythonx/jedi/docs/_static/custom_style.css b/bundle/jedi-vim/pythonx/jedi/docs/_static/custom_style.css new file mode 100644 index 000000000..a9e8b8074 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/docs/_static/custom_style.css @@ -0,0 +1,9 @@ +div.version { + color: black !important; + margin-top: -1.2em !important; + margin-bottom: .6em !important; +} + +div.wy-side-nav-search { + padding-top: 0 !important; +} diff --git a/bundle/jedi-vim/pythonx/jedi/docs/_static/logo-src.txt b/bundle/jedi-vim/pythonx/jedi/docs/_static/logo-src.txt new file mode 100644 index 000000000..59aa4f6cc --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/docs/_static/logo-src.txt @@ -0,0 +1,3 @@ +The source of the logo is a photoshop file hosted here: + +https://dl.dropboxusercontent.com/u/170011615/Jedi12_Logo.psd.xz diff --git a/bundle/jedi-vim/pythonx/jedi/docs/_static/logo.png b/bundle/jedi-vim/pythonx/jedi/docs/_static/logo.png new file mode 100644 index 000000000..9f3d2446f Binary files /dev/null and b/bundle/jedi-vim/pythonx/jedi/docs/_static/logo.png differ diff --git a/bundle/jedi-vim/pythonx/jedi/docs/_templates/ghbuttons.html b/bundle/jedi-vim/pythonx/jedi/docs/_templates/ghbuttons.html new file mode 100644 index 000000000..07292e1f6 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/docs/_templates/ghbuttons.html @@ -0,0 +1,4 @@ +

Github

+ +

diff --git a/bundle/jedi-vim/pythonx/jedi/docs/_templates/sidebarlogo.html b/bundle/jedi-vim/pythonx/jedi/docs/_templates/sidebarlogo.html new file mode 100644 index 000000000..d9243c4eb --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/docs/_templates/sidebarlogo.html @@ -0,0 +1,3 @@ + diff --git a/bundle/jedi-vim/pythonx/jedi/docs/conf.py b/bundle/jedi-vim/pythonx/jedi/docs/conf.py new file mode 100644 index 000000000..057a23609 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/docs/conf.py @@ -0,0 +1,294 @@ +# Jedi documentation build configuration file, created by +# sphinx-quickstart on Wed Dec 26 00:11:34 2012. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath('..')) + +# -- General configuration ----------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.todo', + 'sphinx.ext.intersphinx', 'sphinx.ext.inheritance_diagram', + 'sphinx_rtd_theme', 'sphinx.ext.autosummary'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +source_encoding = 'utf-8' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = 'Jedi' +copyright = 'jedi contributors' + +import jedi +from jedi.utils import version_info + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y.Z version. +version = '.'.join(str(x) for x in version_info()[:3]) +# The full version, including alpha/beta/rc tags. +release = jedi.__version__ + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = [] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'sphinx_rtd_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + 'logo_only': True, + 'style_nav_header_background': 'white', +} + +# Add any paths that contain custom themes here, relative to this directory. +html_theme_path = ['_themes'] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +html_logo = '_static/logo.png' + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +html_css_files = ['custom_style.css'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +html_sidebars = { + '**': [ + 'sidebarlogo.html', + 'localtoc.html', + #'relations.html', + 'ghbuttons.html', + #'sourcelink.html', + 'searchbox.html' + ] +} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +html_show_sourcelink = False + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +html_show_sphinx = False + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +html_show_copyright = False + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'Jedidoc' + +#html_style = 'default.css' # Force usage of default template on RTD + + +# -- Options for LaTeX output -------------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + #'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'Jedi.tex', 'Jedi Documentation', + 'Jedi contributors', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output -------------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'jedi', 'Jedi Documentation', + ['Jedi contributors'], 1) +] + +# If true, show URL addresses after external links. +#man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------------ + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'Jedi', 'Jedi Documentation', + 'Jedi contributors', 'Jedi', 'Awesome Python autocompletion library.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + +# -- Options for todo module --------------------------------------------------- + +todo_include_todos = False + +# -- Options for autodoc module ------------------------------------------------ + +autoclass_content = 'both' +autodoc_member_order = 'bysource' +autodoc_default_flags = [] +#autodoc_default_flags = ['members', 'undoc-members'] + + +# -- Options for intersphinx module -------------------------------------------- + +intersphinx_mapping = { + 'python': ('https://docs.python.org/', None), + 'parso': ('https://parso.readthedocs.io/en/latest/', None), +} + + +def skip_deprecated(app, what, name, obj, skip, options): + """ + All attributes containing a deprecated note shouldn't be documented + anymore. This makes it even clearer that they are not supported anymore. + """ + doc = obj.__doc__ + return skip or doc and '.. deprecated::' in doc + + +def setup(app): + app.connect('autodoc-skip-member', skip_deprecated) diff --git a/bundle/jedi-vim/pythonx/jedi/docs/docs/acknowledgements.rst b/bundle/jedi-vim/pythonx/jedi/docs/docs/acknowledgements.rst new file mode 100644 index 000000000..52fa533d5 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/docs/docs/acknowledgements.rst @@ -0,0 +1,66 @@ +.. include global.rst + +History & Acknowledgements +========================== + +Acknowledgements +---------------- + +- Dave Halter for creating and maintaining Jedi & Parso. +- Takafumi Arakaki (@tkf) for creating a solid test environment and a lot of + other things. +- Danilo Bargen (@dbrgn) for general housekeeping and being a good friend :). +- Guido van Rossum (@gvanrossum) for creating the parser generator pgen2 + (originally used in lib2to3). +- Thanks to all the :ref:`contributors `. + +A Little Bit of History +----------------------- + +Written by Dave. + +The Star Wars Jedi are awesome. My Jedi software tries to imitate a little bit +of the precognition the Jedi have. There's even an awesome `scene +`_ of Monty Python Jedis :-). + +But actually the name has not much to do with Star Wars. It's part of my +second name Jedidjah. + +I actually started Jedi back in 2012, because there were no good solutions +available for VIM. Most auto-completion solutions just did not work well. The +only good solution was PyCharm. But I liked my good old VIM very much. There +was also a solution called Rope that did not work at all for me. So I decided +to write my own version of a completion engine. + +The first idea was to execute non-dangerous code. But I soon realized, that +this would not work. So I started to build a static analysis tool. +The biggest problem that I had at the time was that I did not know a thing +about parsers.I did not did not even know the word static analysis. It turns +out they are the foundation of a good static analysis tool. I of course did not +know that and tried to write my own poor version of a parser that I ended up +throwing away two years later. + +Because of my lack of knowledge, everything after 2012 and before 2020 was +basically refactoring. I rewrote the core parts of Jedi probably like 5-10 +times. The last big rewrite (that I did twice) was the inclusion of +gradual typing and stubs. + +I learned during that time that it is crucial to have a good understanding of +your problem. Otherwise you just end up doing it again. I only wrote features +in the beginning and in the end. Everything else was bugfixing and refactoring. +However now I am really happy with the result. It works well, bugfixes can be +quick and is pretty much feature complete. + +-------- + +I will leave you with a small annectote that happend in 2012, if I remember +correctly. After I explained Guido van Rossum, how some parts of my +auto-completion work, he said: + + *"Oh, that worries me..."* + +Now that it is finished, I hope he likes it :-). + +.. _contributors: + +.. include:: ../../AUTHORS.txt diff --git a/bundle/jedi-vim/pythonx/jedi/docs/docs/api-classes.rst b/bundle/jedi-vim/pythonx/jedi/docs/docs/api-classes.rst new file mode 100644 index 000000000..681a312bf --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/docs/docs/api-classes.rst @@ -0,0 +1,53 @@ +.. include:: ../global.rst + +.. _api-classes: + +API Return Classes +------------------ + +Abstract Base Class +~~~~~~~~~~~~~~~~~~~ +.. autoclass:: jedi.api.classes.BaseName + :members: + :show-inheritance: + +Name +~~~~ +.. autoclass:: jedi.api.classes.Name + :members: + :show-inheritance: + +Completion +~~~~~~~~~~ +.. autoclass:: jedi.api.classes.Completion + :members: + :show-inheritance: + +BaseSignature +~~~~~~~~~~~~~ +.. autoclass:: jedi.api.classes.BaseSignature + :members: + :show-inheritance: + +Signature +~~~~~~~~~ +.. autoclass:: jedi.api.classes.Signature + :members: + :show-inheritance: + +ParamName +~~~~~~~~~ +.. autoclass:: jedi.api.classes.ParamName + :members: + :show-inheritance: + +Refactoring +~~~~~~~~~~~ + +.. autoclass:: jedi.api.refactoring.Refactoring + :members: + :show-inheritance: + +.. autoclass:: jedi.api.errors.SyntaxError + :members: + :show-inheritance: diff --git a/bundle/jedi-vim/pythonx/jedi/docs/docs/api.rst b/bundle/jedi-vim/pythonx/jedi/docs/docs/api.rst new file mode 100644 index 000000000..8eac9bd6c --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/docs/docs/api.rst @@ -0,0 +1,173 @@ +.. include:: ../global.rst + +API Overview +============ + +.. note:: This documentation is mostly for Plugin developers, who want to + improve their editors/IDE with Jedi. + +.. _api: + +The API consists of a few different parts: + +- The main starting points for complete/goto: :class:`.Script` and + :class:`.Interpreter`. If you work with Jedi you want to understand these + classes first. +- :ref:`API Result Classes ` +- :ref:`Python Versions/Virtualenv Support ` with functions like + :func:`.find_system_environments` and :func:`.find_virtualenvs` +- A way to work with different :ref:`Folders / Projects ` +- Helpful functions: :func:`.preload_module` and :func:`.set_debug_function` + +The methods that you are most likely going to use to work with Jedi are the +following ones: + +.. currentmodule:: jedi + +.. autosummary:: + :nosignatures: + + Script.complete + Script.goto + Script.infer + Script.help + Script.get_signatures + Script.get_references + Script.get_context + Script.get_names + Script.get_syntax_errors + Script.rename + Script.inline + Script.extract_variable + Script.extract_function + Script.search + Script.complete_search + Project.search + Project.complete_search + +Script +------ + +.. autoclass:: jedi.Script + :members: + +Interpreter +----------- +.. autoclass:: jedi.Interpreter + :members: + +.. _projects: + +Projects +-------- + +.. automodule:: jedi.api.project + +.. autofunction:: jedi.get_default_project +.. autoclass:: jedi.Project + :members: + +.. _environments: + +Environments +------------ + +.. automodule:: jedi.api.environment + +.. autofunction:: jedi.find_system_environments +.. autofunction:: jedi.find_virtualenvs +.. autofunction:: jedi.get_system_environment +.. autofunction:: jedi.create_environment +.. autofunction:: jedi.get_default_environment +.. autoexception:: jedi.InvalidPythonEnvironment +.. autoclass:: jedi.api.environment.Environment + :members: + +Helper Functions +---------------- + +.. autofunction:: jedi.preload_module +.. autofunction:: jedi.set_debug_function + +Errors +------ + +.. autoexception:: jedi.InternalError +.. autoexception:: jedi.RefactoringError + +Examples +-------- + +Completions +~~~~~~~~~~~ + +.. sourcecode:: python + + >>> import jedi + >>> code = '''import json; json.l''' + >>> script = jedi.Script(code, path='example.py') + >>> script + > + >>> completions = script.complete(1, 19) + >>> completions + [, ] + >>> completions[1] + + >>> completions[1].complete + 'oads' + >>> completions[1].name + 'loads' + +Type Inference / Goto +~~~~~~~~~~~~~~~~~~~~~ + +.. sourcecode:: python + + >>> import jedi + >>> code = '''\ + ... def my_func(): + ... print 'called' + ... + ... alias = my_func + ... my_list = [1, None, alias] + ... inception = my_list[2] + ... + ... inception()''' + >>> script = jedi.Script(code) + >>> + >>> script.goto(8, 1) + [] + >>> + >>> script.infer(8, 1) + [] + +References +~~~~~~~~~~ + +.. sourcecode:: python + + >>> import jedi + >>> code = '''\ + ... x = 3 + ... if 1 == 2: + ... x = 4 + ... else: + ... del x''' + >>> script = jedi.Script(code) + >>> rns = script.get_references(5, 8) + >>> rns + [, + , + ] + >>> rns[1].line + 3 + >>> rns[1].column + 4 + +Deprecations +------------ + +The deprecation process is as follows: + +1. A deprecation is announced in any release. +2. The next major release removes the deprecated functionality. diff --git a/bundle/jedi-vim/pythonx/jedi/docs/docs/changelog.rst b/bundle/jedi-vim/pythonx/jedi/docs/docs/changelog.rst new file mode 100644 index 000000000..09929fe43 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/docs/docs/changelog.rst @@ -0,0 +1 @@ +.. include:: ../../CHANGELOG.rst diff --git a/bundle/jedi-vim/pythonx/jedi/docs/docs/development.rst b/bundle/jedi-vim/pythonx/jedi/docs/docs/development.rst new file mode 100644 index 000000000..6b00aa1e2 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/docs/docs/development.rst @@ -0,0 +1,219 @@ +.. include:: ../global.rst + +Jedi Development +================ + +.. currentmodule:: jedi + +.. note:: This documentation is for Jedi developers who want to improve Jedi + itself, but have no idea how Jedi works. If you want to use Jedi for + your IDE, look at the `plugin api `_. + It is also important to note that it's a pretty old version and some things + might not apply anymore. + + +Introduction +------------ + +This page tries to address the fundamental demand for documentation of the +|jedi| internals. Understanding a dynamic language is a complex task. Especially +because type inference in Python can be a very recursive task. Therefore |jedi| +couldn't get rid of complexity. I know that **simple is better than complex**, +but unfortunately it sometimes requires complex solutions to understand complex +systems. + +In six chapters I'm trying to describe the internals of |jedi|: + +- :ref:`The Jedi Core ` +- :ref:`Core Extensions ` +- :ref:`Imports & Modules ` +- :ref:`Stubs & Annotations ` +- :ref:`Caching & Recursions ` +- :ref:`Helper modules ` + +.. note:: Testing is not documented here, you'll find that + `right here `_. + + +.. _core: + +The Jedi Core +------------- + +The core of Jedi consists of three parts: + +- :ref:`Parser ` +- :ref:`Python type inference ` +- :ref:`API ` + +Most people are probably interested in :ref:`type inference `, +because that's where all the magic happens. I need to introduce the :ref:`parser +` first, because :mod:`jedi.inference` uses it extensively. + +.. _parser: + +Parser +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Jedi used to have its internal parser, however this is now a separate project +and is called `parso `_. + +The parser creates a syntax tree that |jedi| analyses and tries to understand. +The grammar that this parser uses is very similar to the official Python +`grammar files `_. + +.. _inference: + +Type inference of python code (inference/__init__.py) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: jedi.inference + +Inference Values (inference/base_value.py) +++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. automodule:: jedi.inference.base_value + +.. inheritance-diagram:: + jedi.inference.value.instance.TreeInstance + jedi.inference.value.klass.ClassValue + jedi.inference.value.function.FunctionValue + jedi.inference.value.function.FunctionExecutionContext + :parts: 1 + + +.. _name_resolution: + +Name resolution (inference/finder.py) ++++++++++++++++++++++++++++++++++++++ + +.. automodule:: jedi.inference.finder + + +.. _dev-api: + +API (api/__init__.py and api/classes.py) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The API has been designed to be as easy to use as possible. The API +documentation can be found `here `_. The API itself contains +little code that needs to be mentioned here. Generally I'm trying to be +conservative with the API. I'd rather not add new API features if they are not +necessary, because it's much harder to deprecate stuff than to add it later. + + +.. _core-extensions: + +Core Extensions +--------------- + +Core Extensions is a summary of the following topics: + +- :ref:`Iterables & Dynamic Arrays ` +- :ref:`Dynamic Parameters ` +- :ref:`Docstrings ` +- :ref:`Refactoring ` + +These topics are very important to understand what Jedi additionally does, but +they could be removed from Jedi and Jedi would still work. But slower and +without some features. + +.. _iterables: + +Iterables & Dynamic Arrays (inference/value/iterable.py) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To understand Python on a deeper level, |jedi| needs to understand some of the +dynamic features of Python like lists that are filled after creation: + +.. automodule:: jedi.inference.value.iterable + + +.. _dynamic_params: + +Parameter completion (inference/dynamic_params.py) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: jedi.inference.dynamic_params + + +.. _docstrings: + +Docstrings (inference/docstrings.py) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: jedi.inference.docstrings + +.. _refactoring: + +Refactoring (api/refactoring.py) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: jedi.api.refactoring + + +.. _imports-modules: + +Imports & Modules +----------------- + + +- :ref:`Modules ` +- :ref:`Builtin Modules ` +- :ref:`Imports ` + + +.. _builtin: + +Compiled Modules (inference/compiled.py) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: jedi.inference.compiled + + +.. _imports: + +Imports (inference/imports.py) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: jedi.inference.imports + +.. _stubs: + +Stubs & Annotations (inference/gradual) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: jedi.inference.gradual + +.. _caching-recursions: + +Caching & Recursions +-------------------- + + +- :ref:`Caching ` +- :ref:`Recursions ` + +.. _cache: + +Caching (cache.py) +~~~~~~~~~~~~~~~~~~ + +.. automodule:: jedi.cache + +.. _recursion: + +Recursions (recursion.py) +~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: jedi.inference.recursion + + +.. _dev-helpers: + +Helper Modules +-------------- + +Most other modules are not really central to how Jedi works. They all contain +relevant code, but you if you understand the modules above, you pretty much +understand Jedi. diff --git a/bundle/jedi-vim/pythonx/jedi/docs/docs/features.rst b/bundle/jedi-vim/pythonx/jedi/docs/docs/features.rst new file mode 100644 index 000000000..e13c197c8 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/docs/docs/features.rst @@ -0,0 +1,110 @@ +.. include:: ../global.rst + +Features and Limitations +======================== + +Jedi's main API calls and features are: + +- Autocompletion: :meth:`.Script.complete`; It's also possible to get it + working in :ref:`your REPL (IPython, etc.) ` +- Goto/Type Inference: :meth:`.Script.goto` and :meth:`.Script.infer` +- Static Analysis: :meth:`.Script.get_names` and :meth:`.Script.get_syntax_errors` +- Refactorings: :meth:`.Script.rename`, :meth:`.Script.inline`, + :meth:`.Script.extract_variable` and :meth:`.Script.extract_function` +- Code Search: :meth:`.Script.search` and :meth:`.Project.search` + +Basic Features +-------------- + +- Python 3.6+ support +- Ignores syntax errors and wrong indentation +- Can deal with complex module / function / class structures +- Great ``virtualenv``/``venv`` support +- Works great with Python's :ref:`type hinting `, +- Understands stub files +- Can infer function arguments for sphinx, epydoc and basic numpydoc docstrings +- Is overall a very solid piece of software that has been refined for a long + time. Bug reports are very welcome and are usually fixed within a few weeks. + + +Supported Python Features +------------------------- + +|jedi| supports many of the widely used Python features: + +- builtins +- returns, yields, yield from +- tuple assignments / array indexing / dictionary indexing / star unpacking +- with-statement / exception handling +- ``*args`` / ``**kwargs`` +- decorators / lambdas / closures +- generators / iterators +- descriptors: property / staticmethod / classmethod / custom descriptors +- some magic methods: ``__call__``, ``__iter__``, ``__next__``, ``__get__``, + ``__getitem__``, ``__init__`` +- ``list.append()``, ``set.add()``, ``list.extend()``, etc. +- (nested) list comprehensions / ternary expressions +- relative imports +- ``getattr()`` / ``__getattr__`` / ``__getattribute__`` +- function annotations +- simple/typical ``sys.path`` modifications +- ``isinstance`` checks for if/while/assert +- namespace packages (includes ``pkgutil``, ``pkg_resources`` and PEP420 namespaces) +- Django / Flask / Buildout support +- Understands Pytest fixtures + + +Limitations +----------- + +In general Jedi's limit is quite high, but for very big projects or very +complex code, sometimes Jedi intentionally stops type inference, to avoid +hanging for a long time. + +Additionally there are some Python patterns Jedi does not support. This is +intentional and below should be a complete list: + +- Arbitrary metaclasses: Some metaclasses like enums and dataclasses are + reimplemented in Jedi to make them work. Most of the time stubs are good + enough to get type inference working, even when metaclasses are involved. +- ``setattr()``, ``__import__()`` +- Writing to some dicts: ``globals()``, ``locals()``, ``object.__dict__`` +- Manipulations of instances outside the instance variables without using + methods + +Performance Issues +~~~~~~~~~~~~~~~~~~ + +Importing ``numpy`` can be quite slow sometimes, as well as loading the +builtins the first time. If you want to speed things up, you could preload +libriaries in |jedi|, with :func:`.preload_module`. However, once loaded, this +should not be a problem anymore. The same is true for huge modules like +``PySide``, ``wx``, ``tensorflow``, ``pandas``, etc. + +Jedi does not have a very good cache layer. This is probably the biggest and +only architectural `issue `_ in +Jedi. Unfortunately it is not easy to change that. Dave Halter is thinking +about rewriting Jedi in Rust, but it has taken Jedi more than 8 years to reach +version 1.0, a rewrite will probably also take years. + +Security +-------- + +For :class:`.Script` +~~~~~~~~~~~~~~~~~~~~ + +Security is an important topic for |jedi|. By default, no code is executed +within Jedi. As long as you write pure Python, everything is inferred +statically. If you enable ``load_unsafe_extensions=True`` for your +:class:`.Project` and you use builtin modules (``c_builtin``) Jedi will execute +those modules. If you don't trust a code base, please do not enable that +option. It might lead to arbitrary code execution. + +For :class:`.Interpreter` +~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you want security for :class:`.Interpreter`, ``do not`` use it. Jedi does +execute properties and in general is not very careful to avoid code execution. +This is intentional: Most people trust the code bases they have imported, +because at that point a malicious code base would have had code execution +already. diff --git a/bundle/jedi-vim/pythonx/jedi/docs/docs/installation.rst b/bundle/jedi-vim/pythonx/jedi/docs/docs/installation.rst new file mode 100644 index 000000000..d8df120c5 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/docs/docs/installation.rst @@ -0,0 +1,88 @@ +.. include:: ../global.rst + +Installation and Configuration +============================== + +.. warning:: Most people will want to install Jedi as a submodule/vendored and + not through pip/system wide. The reason for this is that it makes sense that + the plugin that uses Jedi has always access to it. Otherwise Jedi will not + work properly when virtualenvs are activated. So please read the + documentation of your editor/IDE plugin to install Jedi. + + For plugin developers, Jedi works best if it is always available. Vendoring + is a pretty good option for that. + +You can either include |jedi| as a submodule in your text editor plugin (like +jedi-vim_ does by default), or you can install it systemwide. + +.. note:: This just installs the |jedi| library, not the :ref:`editor plugins + `. For information about how to make it work with your + editor, refer to the corresponding documentation. + + +The normal way +-------------- + +Most people use Jedi with a :ref:`editor plugins`. Typically +you install Jedi by installing an editor plugin. No necessary steps are needed. +Just take a look at the instructions for the plugin. + + +With pip +-------- + +On any system you can install |jedi| directly from the Python package index +using pip:: + + sudo pip install jedi + +If you want to install the current development version (master branch):: + + sudo pip install -e git://github.com/davidhalter/jedi.git#egg=jedi + + +System-wide installation via a package manager +---------------------------------------------- + +Arch Linux +~~~~~~~~~~ + +You can install |jedi| directly from official Arch Linux packages: + +- `python-jedi `__ + +(There is also a packaged version of the vim plugin available: +`vim-jedi at Arch Linux `__.) + +Debian +~~~~~~ + +Debian packages are available in the `unstable repository +`__. + +Others +~~~~~~ + +We are in the discussion of adding |jedi| to the Fedora repositories. + + +Manual installation from GitHub +--------------------------------------------- + +If you prefer not to use an automated package installer, you can clone the source from GitHub and install it manually. To install it, run these commands:: + + git clone --recurse-submodules https://github.com/davidhalter/jedi + cd jedi + sudo python setup.py install + +Inclusion as a submodule +------------------------ + +If you use an editor plugin like jedi-vim_, you can simply include |jedi| as a +git submodule of the plugin directory. Vim plugin managers like Vundle_ or +Pathogen_ make it very easy to keep submodules up to date. + + +.. _jedi-vim: https://github.com/davidhalter/jedi-vim +.. _vundle: https://github.com/gmarik/vundle +.. _pathogen: https://github.com/tpope/vim-pathogen diff --git a/bundle/jedi-vim/pythonx/jedi/docs/docs/settings.rst b/bundle/jedi-vim/pythonx/jedi/docs/docs/settings.rst new file mode 100644 index 000000000..640a110a7 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/docs/docs/settings.rst @@ -0,0 +1,6 @@ +.. include:: ../global.rst + +Settings +======== + +.. automodule:: jedi.settings diff --git a/bundle/jedi-vim/pythonx/jedi/docs/docs/testing.rst b/bundle/jedi-vim/pythonx/jedi/docs/docs/testing.rst new file mode 100644 index 000000000..223cc2928 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/docs/docs/testing.rst @@ -0,0 +1,36 @@ +.. include:: ../global.rst + +Jedi Testing +============ + +The test suite depends on ``pytest``:: + + pip install pytest + +If you want to test only a specific Python version (e.g. Python 3.8), it is as +easy as:: + + python3.8 -m pytest + +Tests are also run automatically on `GitHub Actions +`_. + +You want to add a test for |jedi|? Great! We love that. Normally you should +write your tests as :ref:`Blackbox Tests `. Most tests would +fit right in there. + +For specific API testing we're using simple unit tests, with a focus on a +simple and readable testing structure. + +.. _blackbox: + +Integration Tests (run.py) +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: test.run + +Refactoring Tests (refactor.py) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: test.refactor + diff --git a/bundle/jedi-vim/pythonx/jedi/docs/docs/usage.rst b/bundle/jedi-vim/pythonx/jedi/docs/docs/usage.rst new file mode 100644 index 000000000..2ef8b8485 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/docs/docs/usage.rst @@ -0,0 +1,275 @@ +.. include:: ../global.rst + +Using Jedi +========== + +|jedi| is can be used with a variety of :ref:`plugins `, +`language servers ` and other software. +It is also possible to use |jedi| in the :ref:`Python shell or with IPython +`. + +Below you can also find a list of :ref:`recipes for type hinting `. + +.. _language-servers: + +Language Servers +-------------- + +- `jedi-language-server `_ +- `python-language-server `_ +- `anakin-language-server `_ + +.. _editor-plugins: + +Editor Plugins +-------------- + +Vim +~~~ + +- jedi-vim_ +- YouCompleteMe_ +- deoplete-jedi_ + +Visual Studio Code +~~~~~~~~~~~~~~~~~~ + +- `Python Extension`_ + +Emacs +~~~~~ + +- Jedi.el_ +- elpy_ +- anaconda-mode_ + +Sublime Text 2/3 +~~~~~~~~~~~~~~~~ + +- SublimeJEDI_ (ST2 & ST3) +- anaconda_ (only ST3) + +SynWrite +~~~~~~~~ + +- SynJedi_ + +TextMate +~~~~~~~~ + +- Textmate_ (Not sure if it's actually working) + +Kate +~~~~ + +- Kate_ version 4.13+ `supports it natively + `__, + you have to enable it, though. + +Atom +~~~~ + +- autocomplete-python-jedi_ + +GNOME Builder +~~~~~~~~~~~~~ + +- `GNOME Builder`_ `supports it natively + `__, + and is enabled by default. + +Gedit +~~~~~ + +- gedi_ + +Eric IDE +~~~~~~~~ + +- `Eric IDE`_ (Available as a plugin) + +Web Debugger +~~~~~~~~~~~~ + +- wdb_ + +xonsh shell +~~~~~~~~~~~ + +Jedi is a preinstalled extension in `xonsh shell `_. +Run the following command to enable: + +:: + + xontrib load jedi + +and many more! + +.. _repl-completion: + +Tab Completion in the Python Shell +---------------------------------- + +Jedi is a dependency of IPython. Autocompletion in IPython is therefore +possible without additional configuration. + +Here is an `example video `_ how REPL completion +can look like in a different shell. + +There are two different options how you can use Jedi autocompletion in +your ``python`` interpreter. One with your custom ``$HOME/.pythonrc.py`` file +and one that uses ``PYTHONSTARTUP``. + +Using ``PYTHONSTARTUP`` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: jedi.api.replstartup + +Using a Custom ``$HOME/.pythonrc.py`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: jedi.utils.setup_readline + +.. _recipes: + +Recipes +------- + +Here are some tips on how to use |jedi| efficiently. + + +.. _type-hinting: + +Type Hinting +~~~~~~~~~~~~ + +If |jedi| cannot detect the type of a function argument correctly (due to the +dynamic nature of Python), you can help it by hinting the type using +one of the docstring/annotation styles below. **Only gradual typing will +always work**, all the docstring solutions are glorified hacks and more +complicated cases will probably not work. + +Official Gradual Typing (Recommended) ++++++++++++++++++++++++++++++++++++++ + +You can read a lot about Python's gradual typing system in the corresponding +PEPs like: + +- `PEP 484 `_ as an introduction +- `PEP 526 `_ for variable annotations +- `PEP 589 `_ for ``TypeDict`` +- There are probably more :) + +Below you can find a few examples how you can use this feature. + +Function annotations:: + + def myfunction(node: ProgramNode, foo: str) -> None: + """Do something with a ``node``. + + """ + node.| # complete here + + +Assignment, for-loop and with-statement type hints:: + + import typing + x: int = foo() + y: typing.Optional[int] = 3 + + key: str + value: Employee + for key, value in foo.items(): + pass + + f: Union[int, float] + with foo() as f: + print(f + 3) + +PEP-0484 should be supported in its entirety. Feel free to open issues if that +is not the case. You can also use stub files. + + +Sphinx style +++++++++++++ + +http://www.sphinx-doc.org/en/stable/domains.html#info-field-lists + +:: + + def myfunction(node, foo): + """ + Do something with a ``node``. + + :type node: ProgramNode + :param str foo: foo parameter description + """ + node.| # complete here + +Epydoc +++++++ + +http://epydoc.sourceforge.net/manual-fields.html + +:: + + def myfunction(node): + """ + Do something with a ``node``. + + @type node: ProgramNode + """ + node.| # complete here + +Numpydoc +++++++++ + +https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt + +In order to support the numpydoc format, you need to install the `numpydoc +`__ package. + +:: + + def foo(var1, var2, long_var_name='hi'): + r""" + A one-line summary that does not use variable names or the + function name. + + ... + + Parameters + ---------- + var1 : array_like + Array_like means all those objects -- lists, nested lists, + etc. -- that can be converted to an array. We can also + refer to variables like `var1`. + var2 : int + The type above can either refer to an actual Python type + (e.g. ``int``), or describe the type of the variable in more + detail, e.g. ``(N,) ndarray`` or ``array_like``. + long_variable_name : {'hi', 'ho'}, optional + Choices in brackets, default first when optional. + + ... + + """ + var2.| # complete here + +.. _jedi-vim: https://github.com/davidhalter/jedi-vim +.. _youcompleteme: https://valloric.github.io/YouCompleteMe/ +.. _deoplete-jedi: https://github.com/zchee/deoplete-jedi +.. _Jedi.el: https://github.com/tkf/emacs-jedi +.. _elpy: https://github.com/jorgenschaefer/elpy +.. _anaconda-mode: https://github.com/proofit404/anaconda-mode +.. _sublimejedi: https://github.com/srusskih/SublimeJEDI +.. _anaconda: https://github.com/DamnWidget/anaconda +.. _SynJedi: http://uvviewsoft.com/synjedi/ +.. _wdb: https://github.com/Kozea/wdb +.. _TextMate: https://github.com/lawrenceakka/python-jedi.tmbundle +.. _kate: https://kate-editor.org/ +.. _autocomplete-python-jedi: https://atom.io/packages/autocomplete-python-jedi +.. _GNOME Builder: https://wiki.gnome.org/Apps/Builder/ +.. _gedi: https://github.com/isamert/gedi +.. _Eric IDE: https://eric-ide.python-projects.org +.. _Python Extension: https://marketplace.visualstudio.com/items?itemName=ms-python.python diff --git a/bundle/jedi-vim/pythonx/jedi/docs/global.rst b/bundle/jedi-vim/pythonx/jedi/docs/global.rst new file mode 100644 index 000000000..c0c90b409 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/docs/global.rst @@ -0,0 +1,3 @@ +:orphan: + +.. |jedi| replace:: Jedi diff --git a/bundle/jedi-vim/pythonx/jedi/docs/index.rst b/bundle/jedi-vim/pythonx/jedi/docs/index.rst new file mode 100644 index 000000000..78435fbf3 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/docs/index.rst @@ -0,0 +1,72 @@ +.. include global.rst + +.. meta:: + :github_url: https://github.com/davidhalter/jedi + +Jedi - an awesome autocompletion, static analysis and refactoring library for Python +==================================================================================== + +.. image:: https://img.shields.io/github/stars/davidhalter/jedi.svg?style=social&label=Star&maxAge=2592000 + :target: https://github.com/davidhalter/jedi + :alt: GitHub stars + +.. image:: http://isitmaintained.com/badge/open/davidhalter/jedi.svg + :target: https://github.com/davidhalter/jedi/issues + :alt: The percentage of open issues and pull requests + +.. image:: http://isitmaintained.com/badge/resolution/davidhalter/jedi.svg + :target: https://github.com/davidhalter/jedi/issues + :alt: The resolution time is the median time an issue or pull request stays open. + +.. image:: https://github.com/davidhalter/jedi/workflows/ci/badge.svg?branch=master + :target: https://github.com/davidhalter/jedi/actions + :alt: Tests + +.. image:: https://coveralls.io/repos/davidhalter/jedi/badge.svg?branch=master + :target: https://coveralls.io/r/davidhalter/jedi + :alt: Coverage status + +.. image:: https://pepy.tech/badge/jedi + :target: https://pepy.tech/project/jedi + :alt: PyPI Downloads + +`Github Repository `_ + +.. automodule:: jedi + +Autocompletion can for example look like this in jedi-vim: + +.. figure:: _screenshots/screenshot_complete.png + + +.. _toc: + +Docs +---- + +.. toctree:: + :maxdepth: 1 + + docs/usage + docs/features + docs/api + docs/api-classes + docs/installation + docs/settings + docs/development + docs/testing + docs/acknowledgements + docs/changelog + + +.. _resources: + +Resources +--------- + +If you want to stay **up-to-date** with releases, please **subscribe** to this +mailing list: https://groups.google.com/g/jedi-announce. To subscribe you can +simply send an empty email to ``jedi-announce+subscribe@googlegroups.com``. + +- `Source Code on Github `_ +- `Python Package Index `_ diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/__init__.py b/bundle/jedi-vim/pythonx/jedi/jedi/__init__.py new file mode 100644 index 000000000..e5ed6f269 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/__init__.py @@ -0,0 +1,42 @@ +""" +Jedi is a static analysis tool for Python that is typically used in +IDEs/editors plugins. Jedi has a focus on autocompletion and goto +functionality. Other features include refactoring, code search and finding +references. + +Jedi has a simple API to work with. There is a reference implementation as a +`VIM-Plugin `_. Autocompletion in your +REPL is also possible, IPython uses it natively and for the CPython REPL you +can install it. Jedi is well tested and bugs should be rare. + +Here's a simple example of the autocompletion feature: + +>>> import jedi +>>> source = ''' +... import json +... json.lo''' +>>> script = jedi.Script(source, path='example.py') +>>> script + +>>> completions = script.complete(3, len('json.lo')) +>>> completions +[, ] +>>> print(completions[0].complete) +ad +>>> print(completions[0].name) +load +""" + +__version__ = '0.18.1' + +from jedi.api import Script, Interpreter, set_debug_function, preload_module +from jedi import settings +from jedi.api.environment import find_virtualenvs, find_system_environments, \ + get_default_environment, InvalidPythonEnvironment, create_environment, \ + get_system_environment, InterpreterEnvironment +from jedi.api.project import Project, get_default_project +from jedi.api.exceptions import InternalError, RefactoringError + +# Finally load the internal plugins. This is only internal. +from jedi.plugins import registry +del registry diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/__main__.py b/bundle/jedi-vim/pythonx/jedi/jedi/__main__.py new file mode 100644 index 000000000..6b4425333 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/__main__.py @@ -0,0 +1,72 @@ +import sys +from os.path import join, dirname, abspath, isdir + + +def _start_linter(): + """ + This is a pre-alpha API. You're not supposed to use it at all, except for + testing. It will very likely change. + """ + import jedi + + if '--debug' in sys.argv: + jedi.set_debug_function() + + for path in sys.argv[2:]: + if path.startswith('--'): + continue + if isdir(path): + import fnmatch + import os + + paths = [] + for root, dirnames, filenames in os.walk(path): + for filename in fnmatch.filter(filenames, '*.py'): + paths.append(os.path.join(root, filename)) + else: + paths = [path] + + try: + for p in paths: + for error in jedi.Script(path=p)._analysis(): + print(error) + except Exception: + if '--pdb' in sys.argv: + import traceback + traceback.print_exc() + import pdb + pdb.post_mortem() + else: + raise + + +def _complete(): + import jedi + import pdb + + if '-d' in sys.argv: + sys.argv.remove('-d') + jedi.set_debug_function() + + try: + completions = jedi.Script(sys.argv[2]).complete() + for c in completions: + c.docstring() + c.type + except Exception as e: + print(repr(e)) + pdb.post_mortem() + else: + print(completions) + + +if len(sys.argv) == 2 and sys.argv[1] == 'repl': + # don't want to use __main__ only for repl yet, maybe we want to use it for + # something else. So just use the keyword ``repl`` for now. + print(join(dirname(abspath(__file__)), 'api', 'replstartup.py')) +elif len(sys.argv) > 1 and sys.argv[1] == '_linter': + _start_linter() +elif len(sys.argv) > 1 and sys.argv[1] == '_complete': + _complete() +else: + print('Command not implemented: %s' % sys.argv[1]) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/_compatibility.py b/bundle/jedi-vim/pythonx/jedi/jedi/_compatibility.py new file mode 100644 index 000000000..13a74b7b3 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/_compatibility.py @@ -0,0 +1,32 @@ +""" +This module is here to ensure compatibility of Windows/Linux/MacOS and +different Python versions. +""" +import errno +import sys +import pickle + + +def pickle_load(file): + try: + return pickle.load(file) + # Python on Windows don't throw EOF errors for pipes. So reraise them with + # the correct type, which is caught upwards. + except OSError: + if sys.platform == 'win32': + raise EOFError() + raise + + +def pickle_dump(data, file, protocol): + try: + pickle.dump(data, file, protocol) + # On Python 3.3 flush throws sometimes an error even though the writing + # operation should be completed. + file.flush() + # Python on Windows don't throw EPIPE errors for pipes. So reraise them with + # the correct type and error number. + except OSError: + if sys.platform == 'win32': + raise IOError(errno.EPIPE, "Broken pipe") + raise diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/api/__init__.py b/bundle/jedi-vim/pythonx/jedi/jedi/api/__init__.py new file mode 100644 index 000000000..0c4db2dc0 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/api/__init__.py @@ -0,0 +1,777 @@ +""" +The API basically only provides one class. You can create a :class:`Script` and +use its methods. + +Additionally you can add a debug function with :func:`set_debug_function`. +Alternatively, if you don't need a custom function and are happy with printing +debug messages to stdout, simply call :func:`set_debug_function` without +arguments. +""" +import sys +from pathlib import Path + +import parso +from parso.python import tree + +from jedi.parser_utils import get_executable_nodes +from jedi import debug +from jedi import settings +from jedi import cache +from jedi.file_io import KnownContentFileIO +from jedi.api import classes +from jedi.api import interpreter +from jedi.api import helpers +from jedi.api.helpers import validate_line_column +from jedi.api.completion import Completion, search_in_module +from jedi.api.keywords import KeywordName +from jedi.api.environment import InterpreterEnvironment +from jedi.api.project import get_default_project, Project +from jedi.api.errors import parso_to_jedi_errors +from jedi.api import refactoring +from jedi.api.refactoring.extract import extract_function, extract_variable +from jedi.inference import InferenceState +from jedi.inference import imports +from jedi.inference.references import find_references +from jedi.inference.arguments import try_iter_content +from jedi.inference.helpers import infer_call_of_leaf +from jedi.inference.sys_path import transform_path_to_dotted +from jedi.inference.syntax_tree import tree_name_to_values +from jedi.inference.value import ModuleValue +from jedi.inference.base_value import ValueSet +from jedi.inference.value.iterable import unpack_tuple_to_dict +from jedi.inference.gradual.conversion import convert_names, convert_values +from jedi.inference.gradual.utils import load_proper_stub_module +from jedi.inference.utils import to_list + +# Jedi uses lots and lots of recursion. By setting this a little bit higher, we +# can remove some "maximum recursion depth" errors. +sys.setrecursionlimit(3000) + + +class Script: + """ + A Script is the base for completions, goto or whatever you want to do with + Jedi. The counter part of this class is :class:`Interpreter`, which works + with actual dictionaries and can work with a REPL. This class + should be used when a user edits code in an editor. + + You can either use the ``code`` parameter or ``path`` to read a file. + Usually you're going to want to use both of them (in an editor). + + The Script's ``sys.path`` is very customizable: + + - If `project` is provided with a ``sys_path``, that is going to be used. + - If `environment` is provided, its ``sys.path`` will be used + (see :func:`Environment.get_sys_path `); + - Otherwise ``sys.path`` will match that of the default environment of + Jedi, which typically matches the sys path that was used at the time + when Jedi was imported. + + Most methods have a ``line`` and a ``column`` parameter. Lines in Jedi are + always 1-based and columns are always zero based. To avoid repetition they + are not always documented. You can omit both line and column. Jedi will + then just do whatever action you are calling at the end of the file. If you + provide only the line, just will complete at the end of that line. + + .. warning:: By default :attr:`jedi.settings.fast_parser` is enabled, which means + that parso reuses modules (i.e. they are not immutable). With this setting + Jedi is **not thread safe** and it is also not safe to use multiple + :class:`.Script` instances and its definitions at the same time. + + If you are a normal plugin developer this should not be an issue. It is + an issue for people that do more complex stuff with Jedi. + + This is purely a performance optimization and works pretty well for all + typical usages, however consider to turn the setting off if it causes + you problems. See also + `this discussion `_. + + :param code: The source code of the current file, separated by newlines. + :type code: str + :param path: The path of the file in the file system, or ``''`` if + it hasn't been saved yet. + :type path: str or pathlib.Path or None + :param Environment environment: Provide a predefined :ref:`Environment ` + to work with a specific Python version or virtualenv. + :param Project project: Provide a :class:`.Project` to make sure finding + references works well, because the right folder is searched. There are + also ways to modify the sys path and other things. + """ + def __init__(self, code=None, *, path=None, environment=None, project=None): + self._orig_path = path + if isinstance(path, str): + path = Path(path) + + self.path = path.absolute() if path else None + + if code is None: + if path is None: + raise ValueError("Must provide at least one of code or path") + + # TODO add a better warning than the traceback! + with open(path, 'rb') as f: + code = f.read() + + if project is None: + # Load the Python grammar of the current interpreter. + project = get_default_project(None if self.path is None else self.path.parent) + + self._inference_state = InferenceState( + project, environment=environment, script_path=self.path + ) + debug.speed('init') + self._module_node, code = self._inference_state.parse_and_get_code( + code=code, + path=self.path, + use_latest_grammar=path and path.suffix == '.pyi', + cache=False, # No disk cache, because the current script often changes. + diff_cache=settings.fast_parser, + cache_path=settings.cache_directory, + ) + debug.speed('parsed') + self._code_lines = parso.split_lines(code, keepends=True) + self._code = code + + cache.clear_time_caches() + debug.reset_time() + + # Cache the module, this is mostly useful for testing, since this shouldn't + # be called multiple times. + @cache.memoize_method + def _get_module(self): + names = None + is_package = False + if self.path is not None: + import_names, is_p = transform_path_to_dotted( + self._inference_state.get_sys_path(add_parent_paths=False), + self.path + ) + if import_names is not None: + names = import_names + is_package = is_p + + if self.path is None: + file_io = None + else: + file_io = KnownContentFileIO(self.path, self._code) + if self.path is not None and self.path.suffix == '.pyi': + # We are in a stub file. Try to load the stub properly. + stub_module = load_proper_stub_module( + self._inference_state, + self._inference_state.latest_grammar, + file_io, + names, + self._module_node + ) + if stub_module is not None: + return stub_module + + if names is None: + names = ('__main__',) + + module = ModuleValue( + self._inference_state, self._module_node, + file_io=file_io, + string_names=names, + code_lines=self._code_lines, + is_package=is_package, + ) + if names[0] not in ('builtins', 'typing'): + # These modules are essential for Jedi, so don't overwrite them. + self._inference_state.module_cache.add(names, ValueSet([module])) + return module + + def _get_module_context(self): + return self._get_module().as_context() + + def __repr__(self): + return '<%s: %s %r>' % ( + self.__class__.__name__, + repr(self._orig_path), + self._inference_state.environment, + ) + + @validate_line_column + def complete(self, line=None, column=None, *, fuzzy=False): + """ + Completes objects under the cursor. + + Those objects contain information about the completions, more than just + names. + + :param fuzzy: Default False. Will return fuzzy completions, which means + that e.g. ``ooa`` will match ``foobar``. + :return: Completion objects, sorted by name. Normal names appear + before "private" names that start with ``_`` and those appear + before magic methods and name mangled names that start with ``__``. + :rtype: list of :class:`.Completion` + """ + with debug.increase_indent_cm('complete'): + completion = Completion( + self._inference_state, self._get_module_context(), self._code_lines, + (line, column), self.get_signatures, fuzzy=fuzzy, + ) + return completion.complete() + + @validate_line_column + def infer(self, line=None, column=None, *, only_stubs=False, prefer_stubs=False): + """ + Return the definitions of under the cursor. It is basically a wrapper + around Jedi's type inference. + + This method follows complicated paths and returns the end, not the + first definition. The big difference between :meth:`goto` and + :meth:`infer` is that :meth:`goto` doesn't + follow imports and statements. Multiple objects may be returned, + because depending on an option you can have two different versions of a + function. + + :param only_stubs: Only return stubs for this method. + :param prefer_stubs: Prefer stubs to Python objects for this method. + :rtype: list of :class:`.Name` + """ + pos = line, column + leaf = self._module_node.get_name_of_position(pos) + if leaf is None: + leaf = self._module_node.get_leaf_for_position(pos) + if leaf is None or leaf.type == 'string': + return [] + if leaf.end_pos == (line, column) and leaf.type == 'operator': + next_ = leaf.get_next_leaf() + if next_.start_pos == leaf.end_pos \ + and next_.type in ('number', 'string', 'keyword'): + leaf = next_ + + context = self._get_module_context().create_context(leaf) + + values = helpers.infer(self._inference_state, context, leaf) + values = convert_values( + values, + only_stubs=only_stubs, + prefer_stubs=prefer_stubs, + ) + + defs = [classes.Name(self._inference_state, c.name) for c in values] + # The additional set here allows the definitions to become unique in an + # API sense. In the internals we want to separate more things than in + # the API. + return helpers.sorted_definitions(set(defs)) + + @validate_line_column + def goto(self, line=None, column=None, *, follow_imports=False, follow_builtin_imports=False, + only_stubs=False, prefer_stubs=False): + """ + Goes to the name that defined the object under the cursor. Optionally + you can follow imports. + Multiple objects may be returned, depending on an if you can have two + different versions of a function. + + :param follow_imports: The method will follow imports. + :param follow_builtin_imports: If ``follow_imports`` is True will try + to look up names in builtins (i.e. compiled or extension modules). + :param only_stubs: Only return stubs for this method. + :param prefer_stubs: Prefer stubs to Python objects for this method. + :rtype: list of :class:`.Name` + """ + tree_name = self._module_node.get_name_of_position((line, column)) + if tree_name is None: + # Without a name we really just want to jump to the result e.g. + # executed by `foo()`, if we the cursor is after `)`. + return self.infer(line, column, only_stubs=only_stubs, prefer_stubs=prefer_stubs) + name = self._get_module_context().create_name(tree_name) + + # Make it possible to goto the super class function/attribute + # definitions, when they are overwritten. + names = [] + if name.tree_name.is_definition() and name.parent_context.is_class(): + class_node = name.parent_context.tree_node + class_value = self._get_module_context().create_value(class_node) + mro = class_value.py__mro__() + next(mro) # Ignore the first entry, because it's the class itself. + for cls in mro: + names = cls.goto(tree_name.value) + if names: + break + + if not names: + names = list(name.goto()) + + if follow_imports: + names = helpers.filter_follow_imports(names, follow_builtin_imports) + names = convert_names( + names, + only_stubs=only_stubs, + prefer_stubs=prefer_stubs, + ) + + defs = [classes.Name(self._inference_state, d) for d in set(names)] + # Avoid duplicates + return list(set(helpers.sorted_definitions(defs))) + + def search(self, string, *, all_scopes=False): + """ + Searches a name in the current file. For a description of how the + search string should look like, please have a look at + :meth:`.Project.search`. + + :param bool all_scopes: Default False; searches not only for + definitions on the top level of a module level, but also in + functions and classes. + :yields: :class:`.Name` + """ + return self._search_func(string, all_scopes=all_scopes) + + @to_list + def _search_func(self, string, all_scopes=False, complete=False, fuzzy=False): + names = self._names(all_scopes=all_scopes) + wanted_type, wanted_names = helpers.split_search_string(string) + return search_in_module( + self._inference_state, + self._get_module_context(), + names=names, + wanted_type=wanted_type, + wanted_names=wanted_names, + complete=complete, + fuzzy=fuzzy, + ) + + def complete_search(self, string, **kwargs): + """ + Like :meth:`.Script.search`, but completes that string. If you want to + have all possible definitions in a file you can also provide an empty + string. + + :param bool all_scopes: Default False; searches not only for + definitions on the top level of a module level, but also in + functions and classes. + :param fuzzy: Default False. Will return fuzzy completions, which means + that e.g. ``ooa`` will match ``foobar``. + :yields: :class:`.Completion` + """ + return self._search_func(string, complete=True, **kwargs) + + @validate_line_column + def help(self, line=None, column=None): + """ + Used to display a help window to users. Uses :meth:`.Script.goto` and + returns additional definitions for keywords and operators. + + Typically you will want to display :meth:`.BaseName.docstring` to the + user for all the returned definitions. + + The additional definitions are ``Name(...).type == 'keyword'``. + These definitions do not have a lot of value apart from their docstring + attribute, which contains the output of Python's :func:`help` function. + + :rtype: list of :class:`.Name` + """ + definitions = self.goto(line, column, follow_imports=True) + if definitions: + return definitions + leaf = self._module_node.get_leaf_for_position((line, column)) + if leaf is not None and leaf.type in ('keyword', 'operator', 'error_leaf'): + def need_pydoc(): + if leaf.value in ('(', ')', '[', ']'): + if leaf.parent.type == 'trailer': + return False + if leaf.parent.type == 'atom': + return False + grammar = self._inference_state.grammar + # This parso stuff is not public, but since I control it, this + # is fine :-) ~dave + reserved = grammar._pgen_grammar.reserved_syntax_strings.keys() + return leaf.value in reserved + + if need_pydoc(): + name = KeywordName(self._inference_state, leaf.value) + return [classes.Name(self._inference_state, name)] + return [] + + @validate_line_column + def get_references(self, line=None, column=None, **kwargs): + """ + Lists all references of a variable in a project. Since this can be + quite hard to do for Jedi, if it is too complicated, Jedi will stop + searching. + + :param include_builtins: Default ``True``. If ``False``, checks if a definition + is a builtin (e.g. ``sys``) and in that case does not return it. + :param scope: Default ``'project'``. If ``'file'``, include references in + the current module only. + :rtype: list of :class:`.Name` + """ + + def _references(include_builtins=True, scope='project'): + if scope not in ('project', 'file'): + raise ValueError('Only the scopes "file" and "project" are allowed') + tree_name = self._module_node.get_name_of_position((line, column)) + if tree_name is None: + # Must be syntax + return [] + + names = find_references(self._get_module_context(), tree_name, scope == 'file') + + definitions = [classes.Name(self._inference_state, n) for n in names] + if not include_builtins or scope == 'file': + definitions = [d for d in definitions if not d.in_builtin_module()] + return helpers.sorted_definitions(definitions) + return _references(**kwargs) + + @validate_line_column + def get_signatures(self, line=None, column=None): + """ + Return the function object of the call under the cursor. + + E.g. if the cursor is here:: + + abs(# <-- cursor is here + + This would return the ``abs`` function. On the other hand:: + + abs()# <-- cursor is here + + This would return an empty list.. + + :rtype: list of :class:`.Signature` + """ + pos = line, column + call_details = helpers.get_signature_details(self._module_node, pos) + if call_details is None: + return [] + + context = self._get_module_context().create_context(call_details.bracket_leaf) + definitions = helpers.cache_signatures( + self._inference_state, + context, + call_details.bracket_leaf, + self._code_lines, + pos + ) + debug.speed('func_call followed') + + # TODO here we use stubs instead of the actual values. We should use + # the signatures from stubs, but the actual values, probably?! + return [classes.Signature(self._inference_state, signature, call_details) + for signature in definitions.get_signatures()] + + @validate_line_column + def get_context(self, line=None, column=None): + """ + Returns the scope context under the cursor. This basically means the + function, class or module where the cursor is at. + + :rtype: :class:`.Name` + """ + pos = (line, column) + leaf = self._module_node.get_leaf_for_position(pos, include_prefixes=True) + if leaf.start_pos > pos or leaf.type == 'endmarker': + previous_leaf = leaf.get_previous_leaf() + if previous_leaf is not None: + leaf = previous_leaf + + module_context = self._get_module_context() + + n = tree.search_ancestor(leaf, 'funcdef', 'classdef') + if n is not None and n.start_pos < pos <= n.children[-1].start_pos: + # This is a bit of a special case. The context of a function/class + # name/param/keyword is always it's parent context, not the + # function itself. Catch all the cases here where we are before the + # suite object, but still in the function. + context = module_context.create_value(n).as_context() + else: + context = module_context.create_context(leaf) + + while context.name is None: + context = context.parent_context # comprehensions + + definition = classes.Name(self._inference_state, context.name) + while definition.type != 'module': + name = definition._name # TODO private access + tree_name = name.tree_name + if tree_name is not None: # Happens with lambdas. + scope = tree_name.get_definition() + if scope.start_pos[1] < column: + break + definition = definition.parent() + return definition + + def _analysis(self): + self._inference_state.is_analysis = True + self._inference_state.analysis_modules = [self._module_node] + module = self._get_module_context() + try: + for node in get_executable_nodes(self._module_node): + context = module.create_context(node) + if node.type in ('funcdef', 'classdef'): + # Resolve the decorators. + tree_name_to_values(self._inference_state, context, node.children[1]) + elif isinstance(node, tree.Import): + import_names = set(node.get_defined_names()) + if node.is_nested(): + import_names |= set(path[-1] for path in node.get_paths()) + for n in import_names: + imports.infer_import(context, n) + elif node.type == 'expr_stmt': + types = context.infer_node(node) + for testlist in node.children[:-1:2]: + # Iterate tuples. + unpack_tuple_to_dict(context, types, testlist) + else: + if node.type == 'name': + defs = self._inference_state.infer(context, node) + else: + defs = infer_call_of_leaf(context, node) + try_iter_content(defs) + self._inference_state.reset_recursion_limitations() + + ana = [a for a in self._inference_state.analysis if self.path == a.path] + return sorted(set(ana), key=lambda x: x.line) + finally: + self._inference_state.is_analysis = False + + def get_names(self, **kwargs): + """ + Returns names defined in the current file. + + :param all_scopes: If True lists the names of all scopes instead of + only the module namespace. + :param definitions: If True lists the names that have been defined by a + class, function or a statement (``a = b`` returns ``a``). + :param references: If True lists all the names that are not listed by + ``definitions=True``. E.g. ``a = b`` returns ``b``. + :rtype: list of :class:`.Name` + """ + names = self._names(**kwargs) + return [classes.Name(self._inference_state, n) for n in names] + + def get_syntax_errors(self): + """ + Lists all syntax errors in the current file. + + :rtype: list of :class:`.SyntaxError` + """ + return parso_to_jedi_errors(self._inference_state.grammar, self._module_node) + + def _names(self, all_scopes=False, definitions=True, references=False): + # Set line/column to a random position, because they don't matter. + module_context = self._get_module_context() + defs = [ + module_context.create_name(name) + for name in helpers.get_module_names( + self._module_node, + all_scopes=all_scopes, + definitions=definitions, + references=references, + ) + ] + return sorted(defs, key=lambda x: x.start_pos) + + def rename(self, line=None, column=None, *, new_name): + """ + Renames all references of the variable under the cursor. + + :param new_name: The variable under the cursor will be renamed to this + string. + :raises: :exc:`.RefactoringError` + :rtype: :class:`.Refactoring` + """ + definitions = self.get_references(line, column, include_builtins=False) + return refactoring.rename(self._inference_state, definitions, new_name) + + @validate_line_column + def extract_variable(self, line, column, *, new_name, until_line=None, until_column=None): + """ + Moves an expression to a new statemenet. + + For example if you have the cursor on ``foo`` and provide a + ``new_name`` called ``bar``:: + + foo = 3.1 + x = int(foo + 1) + + the code above will become:: + + foo = 3.1 + bar = foo + 1 + x = int(bar) + + :param new_name: The expression under the cursor will be renamed to + this string. + :param int until_line: The the selection range ends at this line, when + omitted, Jedi will be clever and try to define the range itself. + :param int until_column: The the selection range ends at this column, when + omitted, Jedi will be clever and try to define the range itself. + :raises: :exc:`.RefactoringError` + :rtype: :class:`.Refactoring` + """ + if until_line is None and until_column is None: + until_pos = None + else: + if until_line is None: + until_line = line + if until_column is None: + until_column = len(self._code_lines[until_line - 1]) + until_pos = until_line, until_column + return extract_variable( + self._inference_state, self.path, self._module_node, + new_name, (line, column), until_pos + ) + + @validate_line_column + def extract_function(self, line, column, *, new_name, until_line=None, until_column=None): + """ + Moves an expression to a new function. + + For example if you have the cursor on ``foo`` and provide a + ``new_name`` called ``bar``:: + + global_var = 3 + + def x(): + foo = 3.1 + x = int(foo + 1 + global_var) + + the code above will become:: + + global_var = 3 + + def bar(foo): + return int(foo + 1 + global_var) + + def x(): + foo = 3.1 + x = bar(foo) + + :param new_name: The expression under the cursor will be replaced with + a function with this name. + :param int until_line: The the selection range ends at this line, when + omitted, Jedi will be clever and try to define the range itself. + :param int until_column: The the selection range ends at this column, when + omitted, Jedi will be clever and try to define the range itself. + :raises: :exc:`.RefactoringError` + :rtype: :class:`.Refactoring` + """ + if until_line is None and until_column is None: + until_pos = None + else: + if until_line is None: + until_line = line + if until_column is None: + until_column = len(self._code_lines[until_line - 1]) + until_pos = until_line, until_column + return extract_function( + self._inference_state, self.path, self._get_module_context(), + new_name, (line, column), until_pos + ) + + def inline(self, line=None, column=None): + """ + Inlines a variable under the cursor. This is basically the opposite of + extracting a variable. For example with the cursor on bar:: + + foo = 3.1 + bar = foo + 1 + x = int(bar) + + the code above will become:: + + foo = 3.1 + x = int(foo + 1) + + :raises: :exc:`.RefactoringError` + :rtype: :class:`.Refactoring` + """ + names = [d._name for d in self.get_references(line, column, include_builtins=True)] + return refactoring.inline(self._inference_state, names) + + +class Interpreter(Script): + """ + Jedi's API for Python REPLs. + + Implements all of the methods that are present in :class:`.Script` as well. + + In addition to completions that normal REPL completion does like + ``str.upper``, Jedi also supports code completion based on static code + analysis. For example Jedi will complete ``str().upper``. + + >>> from os.path import join + >>> namespace = locals() + >>> script = Interpreter('join("").up', [namespace]) + >>> print(script.complete()[0].name) + upper + + All keyword arguments are same as the arguments for :class:`.Script`. + + :param str code: Code to parse. + :type namespaces: typing.List[dict] + :param namespaces: A list of namespace dictionaries such as the one + returned by :func:`globals` and :func:`locals`. + """ + _allow_descriptor_getattr_default = True + + def __init__(self, code, namespaces, *, project=None, **kwds): + try: + namespaces = [dict(n) for n in namespaces] + except Exception: + raise TypeError("namespaces must be a non-empty list of dicts.") + + environment = kwds.get('environment', None) + if environment is None: + environment = InterpreterEnvironment() + else: + if not isinstance(environment, InterpreterEnvironment): + raise TypeError("The environment needs to be an InterpreterEnvironment subclass.") + + if project is None: + project = Project(Path.cwd()) + + super().__init__(code, environment=environment, project=project, **kwds) + + self.namespaces = namespaces + self._inference_state.allow_descriptor_getattr = self._allow_descriptor_getattr_default + + @cache.memoize_method + def _get_module_context(self): + if self.path is None: + file_io = None + else: + file_io = KnownContentFileIO(self.path, self._code) + tree_module_value = ModuleValue( + self._inference_state, self._module_node, + file_io=file_io, + string_names=('__main__',), + code_lines=self._code_lines, + ) + return interpreter.MixedModuleContext( + tree_module_value, + self.namespaces, + ) + + +def preload_module(*modules): + """ + Preloading modules tells Jedi to load a module now, instead of lazy parsing + of modules. This can be useful for IDEs, to control which modules to load + on startup. + + :param modules: different module names, list of string. + """ + for m in modules: + s = "import %s as x; x." % m + Script(s).complete(1, len(s)) + + +def set_debug_function(func_cb=debug.print_to_stdout, warnings=True, + notices=True, speed=True): + """ + Define a callback debug function to get all the debug messages. + + If you don't specify any arguments, debug messages will be printed to stdout. + + :param func_cb: The callback function for debug messages. + """ + debug.debug_function = func_cb + debug.enable_warning = warnings + debug.enable_notice = notices + debug.enable_speed = speed diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/api/classes.py b/bundle/jedi-vim/pythonx/jedi/jedi/api/classes.py new file mode 100644 index 000000000..ee741c33f --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/api/classes.py @@ -0,0 +1,896 @@ +""" +There are a couple of classes documented in here: + +- :class:`.BaseName` as an abstact base class for almost everything. +- :class:`.Name` used in a lot of places +- :class:`.Completion` for completions +- :class:`.BaseSignature` as a base class for signatures +- :class:`.Signature` for :meth:`.Script.get_signatures` only +- :class:`.ParamName` used for parameters of signatures +- :class:`.Refactoring` for refactorings +- :class:`.SyntaxError` for :meth:`.Script.get_syntax_errors` only + +These classes are the much biggest part of the API, because they contain +the interesting information about all operations. +""" +import re +from pathlib import Path +from typing import Optional + +from parso.tree import search_ancestor + +from jedi import settings +from jedi import debug +from jedi.inference.utils import unite +from jedi.cache import memoize_method +from jedi.inference.compiled.mixed import MixedName +from jedi.inference.names import ImportName, SubModuleName +from jedi.inference.gradual.stub_value import StubModuleValue +from jedi.inference.gradual.conversion import convert_names, convert_values +from jedi.inference.base_value import ValueSet, HasNoContext +from jedi.api.keywords import KeywordName +from jedi.api import completion_cache +from jedi.api.helpers import filter_follow_imports + + +def _sort_names_by_start_pos(names): + return sorted(names, key=lambda s: s.start_pos or (0, 0)) + + +def defined_names(inference_state, value): + """ + List sub-definitions (e.g., methods in class). + + :type scope: Scope + :rtype: list of Name + """ + try: + context = value.as_context() + except HasNoContext: + return [] + filter = next(context.get_filters()) + names = [name for name in filter.values()] + return [Name(inference_state, n) for n in _sort_names_by_start_pos(names)] + + +def _values_to_definitions(values): + return [Name(c.inference_state, c.name) for c in values] + + +class BaseName: + """ + The base class for all definitions, completions and signatures. + """ + _mapping = { + 'posixpath': 'os.path', + 'riscospath': 'os.path', + 'ntpath': 'os.path', + 'os2emxpath': 'os.path', + 'macpath': 'os.path', + 'genericpath': 'os.path', + 'posix': 'os', + '_io': 'io', + '_functools': 'functools', + '_collections': 'collections', + '_socket': 'socket', + '_sqlite3': 'sqlite3', + } + + _tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in { + 'argparse._ActionsContainer': 'argparse.ArgumentParser', + }.items()) + + def __init__(self, inference_state, name): + self._inference_state = inference_state + self._name = name + """ + An instance of :class:`parso.python.tree.Name` subclass. + """ + self.is_keyword = isinstance(self._name, KeywordName) + + @memoize_method + def _get_module_context(self): + # This can take a while to complete, because in the worst case of + # imports (consider `import a` completions), we need to load all + # modules starting with a first. + return self._name.get_root_context() + + @property + def module_path(self) -> Optional[Path]: + """ + Shows the file path of a module. e.g. ``/usr/lib/python3.9/os.py`` + """ + module = self._get_module_context() + if module.is_stub() or not module.is_compiled(): + # Compiled modules should not return a module path even if they + # have one. + path: Optional[Path] = self._get_module_context().py__file__() + if path is not None: + return path + + return None + + @property + def name(self): + """ + Name of variable/function/class/module. + + For example, for ``x = None`` it returns ``'x'``. + + :rtype: str or None + """ + return self._name.get_public_name() + + @property + def type(self): + """ + The type of the definition. + + Here is an example of the value of this attribute. Let's consider + the following source. As what is in ``variable`` is unambiguous + to Jedi, :meth:`jedi.Script.infer` should return a list of + definition for ``sys``, ``f``, ``C`` and ``x``. + + >>> from jedi import Script + >>> source = ''' + ... import keyword + ... + ... class C: + ... pass + ... + ... class D: + ... pass + ... + ... x = D() + ... + ... def f(): + ... pass + ... + ... for variable in [keyword, f, C, x]: + ... variable''' + + >>> script = Script(source) + >>> defs = script.infer() + + Before showing what is in ``defs``, let's sort it by :attr:`line` + so that it is easy to relate the result to the source code. + + >>> defs = sorted(defs, key=lambda d: d.line) + >>> print(defs) # doctest: +NORMALIZE_WHITESPACE + [, + , + , + ] + + Finally, here is what you can get from :attr:`type`: + + >>> defs = [d.type for d in defs] + >>> defs[0] + 'module' + >>> defs[1] + 'class' + >>> defs[2] + 'instance' + >>> defs[3] + 'function' + + Valid values for type are ``module``, ``class``, ``instance``, ``function``, + ``param``, ``path``, ``keyword``, ``property`` and ``statement``. + + """ + tree_name = self._name.tree_name + resolve = False + if tree_name is not None: + # TODO move this to their respective names. + definition = tree_name.get_definition() + if definition is not None and definition.type == 'import_from' and \ + tree_name.is_definition(): + resolve = True + + if isinstance(self._name, SubModuleName) or resolve: + for value in self._name.infer(): + return value.api_type + return self._name.api_type + + @property + def module_name(self): + """ + The module name, a bit similar to what ``__name__`` is in a random + Python module. + + >>> from jedi import Script + >>> source = 'import json' + >>> script = Script(source, path='example.py') + >>> d = script.infer()[0] + >>> print(d.module_name) # doctest: +ELLIPSIS + json + """ + return self._get_module_context().py__name__() + + def in_builtin_module(self): + """ + Returns True, if this is a builtin module. + """ + value = self._get_module_context().get_value() + if isinstance(value, StubModuleValue): + return any(v.is_compiled() for v in value.non_stub_value_set) + return value.is_compiled() + + @property + def line(self): + """The line where the definition occurs (starting with 1).""" + start_pos = self._name.start_pos + if start_pos is None: + return None + return start_pos[0] + + @property + def column(self): + """The column where the definition occurs (starting with 0).""" + start_pos = self._name.start_pos + if start_pos is None: + return None + return start_pos[1] + + def get_definition_start_position(self): + """ + The (row, column) of the start of the definition range. Rows start with + 1, columns start with 0. + + :rtype: Optional[Tuple[int, int]] + """ + if self._name.tree_name is None: + return None + definition = self._name.tree_name.get_definition() + if definition is None: + return self._name.start_pos + return definition.start_pos + + def get_definition_end_position(self): + """ + The (row, column) of the end of the definition range. Rows start with + 1, columns start with 0. + + :rtype: Optional[Tuple[int, int]] + """ + if self._name.tree_name is None: + return None + definition = self._name.tree_name.get_definition() + if definition is None: + return self._name.tree_name.end_pos + if self.type in ("function", "class"): + last_leaf = definition.get_last_leaf() + if last_leaf.type == "newline": + return last_leaf.get_previous_leaf().end_pos + return last_leaf.end_pos + return definition.end_pos + + def docstring(self, raw=False, fast=True): + r""" + Return a document string for this completion object. + + Example: + + >>> from jedi import Script + >>> source = '''\ + ... def f(a, b=1): + ... "Document for function f." + ... ''' + >>> script = Script(source, path='example.py') + >>> doc = script.infer(1, len('def f'))[0].docstring() + >>> print(doc) + f(a, b=1) + + Document for function f. + + Notice that useful extra information is added to the actual + docstring, e.g. function signatures are prepended to their docstrings. + If you need the actual docstring, use ``raw=True`` instead. + + >>> print(script.infer(1, len('def f'))[0].docstring(raw=True)) + Document for function f. + + :param fast: Don't follow imports that are only one level deep like + ``import foo``, but follow ``from foo import bar``. This makes + sense for speed reasons. Completing `import a` is slow if you use + the ``foo.docstring(fast=False)`` on every object, because it + parses all libraries starting with ``a``. + """ + if isinstance(self._name, ImportName) and fast: + return '' + doc = self._get_docstring() + if raw: + return doc + + signature_text = self._get_docstring_signature() + if signature_text and doc: + return signature_text + '\n\n' + doc + else: + return signature_text + doc + + def _get_docstring(self): + return self._name.py__doc__() + + def _get_docstring_signature(self): + return '\n'.join( + signature.to_string() + for signature in self._get_signatures(for_docstring=True) + ) + + @property + def description(self): + """ + A description of the :class:`.Name` object, which is heavily used + in testing. e.g. for ``isinstance`` it returns ``def isinstance``. + + Example: + + >>> from jedi import Script + >>> source = ''' + ... def f(): + ... pass + ... + ... class C: + ... pass + ... + ... variable = f if random.choice([0,1]) else C''' + >>> script = Script(source) # line is maximum by default + >>> defs = script.infer(column=3) + >>> defs = sorted(defs, key=lambda d: d.line) + >>> print(defs) # doctest: +NORMALIZE_WHITESPACE + [, + ] + >>> str(defs[0].description) + 'def f' + >>> str(defs[1].description) + 'class C' + + """ + typ = self.type + tree_name = self._name.tree_name + if typ == 'param': + return typ + ' ' + self._name.to_string() + if typ in ('function', 'class', 'module', 'instance') or tree_name is None: + if typ == 'function': + # For the description we want a short and a pythonic way. + typ = 'def' + return typ + ' ' + self._name.get_public_name() + + definition = tree_name.get_definition(include_setitem=True) or tree_name + # Remove the prefix, because that's not what we want for get_code + # here. + txt = definition.get_code(include_prefix=False) + # Delete comments: + txt = re.sub(r'#[^\n]+\n', ' ', txt) + # Delete multi spaces/newlines + txt = re.sub(r'\s+', ' ', txt).strip() + return txt + + @property + def full_name(self): + """ + Dot-separated path of this object. + + It is in the form of ``[.[...]][.]``. + It is useful when you want to look up Python manual of the + object at hand. + + Example: + + >>> from jedi import Script + >>> source = ''' + ... import os + ... os.path.join''' + >>> script = Script(source, path='example.py') + >>> print(script.infer(3, len('os.path.join'))[0].full_name) + os.path.join + + Notice that it returns ``'os.path.join'`` instead of (for example) + ``'posixpath.join'``. This is not correct, since the modules name would + be `````. However most users find the latter + more practical. + """ + if not self._name.is_value_name: + return None + + names = self._name.get_qualified_names(include_module_names=True) + if names is None: + return None + + names = list(names) + try: + names[0] = self._mapping[names[0]] + except KeyError: + pass + + return '.'.join(names) + + def is_stub(self): + """ + Returns True if the current name is defined in a stub file. + """ + if not self._name.is_value_name: + return False + + return self._name.get_root_context().is_stub() + + def is_side_effect(self): + """ + Checks if a name is defined as ``self.foo = 3``. In case of self, this + function would return False, for foo it would return True. + """ + tree_name = self._name.tree_name + if tree_name is None: + return False + return tree_name.is_definition() and tree_name.parent.type == 'trailer' + + @debug.increase_indent_cm('goto on name') + def goto(self, *, follow_imports=False, follow_builtin_imports=False, + only_stubs=False, prefer_stubs=False): + + """ + Like :meth:`.Script.goto` (also supports the same params), but does it + for the current name. This is typically useful if you are using + something like :meth:`.Script.get_names()`. + + :param follow_imports: The goto call will follow imports. + :param follow_builtin_imports: If follow_imports is True will try to + look up names in builtins (i.e. compiled or extension modules). + :param only_stubs: Only return stubs for this goto call. + :param prefer_stubs: Prefer stubs to Python objects for this goto call. + :rtype: list of :class:`Name` + """ + if not self._name.is_value_name: + return [] + + names = self._name.goto() + if follow_imports: + names = filter_follow_imports(names, follow_builtin_imports) + names = convert_names( + names, + only_stubs=only_stubs, + prefer_stubs=prefer_stubs, + ) + return [self if n == self._name else Name(self._inference_state, n) + for n in names] + + @debug.increase_indent_cm('infer on name') + def infer(self, *, only_stubs=False, prefer_stubs=False): + """ + Like :meth:`.Script.infer`, it can be useful to understand which type + the current name has. + + Return the actual definitions. I strongly recommend not using it for + your completions, because it might slow down |jedi|. If you want to + read only a few objects (<=20), it might be useful, especially to get + the original docstrings. The basic problem of this function is that it + follows all results. This means with 1000 completions (e.g. numpy), + it's just very, very slow. + + :param only_stubs: Only return stubs for this goto call. + :param prefer_stubs: Prefer stubs to Python objects for this type + inference call. + :rtype: list of :class:`Name` + """ + assert not (only_stubs and prefer_stubs) + + if not self._name.is_value_name: + return [] + + # First we need to make sure that we have stub names (if possible) that + # we can follow. If we don't do that, we can end up with the inferred + # results of Python objects instead of stubs. + names = convert_names([self._name], prefer_stubs=True) + values = convert_values( + ValueSet.from_sets(n.infer() for n in names), + only_stubs=only_stubs, + prefer_stubs=prefer_stubs, + ) + resulting_names = [c.name for c in values] + return [self if n == self._name else Name(self._inference_state, n) + for n in resulting_names] + + def parent(self): + """ + Returns the parent scope of this identifier. + + :rtype: Name + """ + if not self._name.is_value_name: + return None + + if self.type in ('function', 'class', 'param') and self._name.tree_name is not None: + # Since the parent_context doesn't really match what the user + # thinks of that the parent is here, we do these cases separately. + # The reason for this is the following: + # - class: Nested classes parent_context is always the + # parent_context of the most outer one. + # - function: Functions in classes have the module as + # parent_context. + # - param: The parent_context of a param is not its function but + # e.g. the outer class or module. + cls_or_func_node = self._name.tree_name.get_definition() + parent = search_ancestor(cls_or_func_node, 'funcdef', 'classdef', 'file_input') + context = self._get_module_context().create_value(parent).as_context() + else: + context = self._name.parent_context + + if context is None: + return None + while context.name is None: + # Happens for comprehension contexts + context = context.parent_context + + return Name(self._inference_state, context.name) + + def __repr__(self): + return "<%s %sname=%r, description=%r>" % ( + self.__class__.__name__, + 'full_' if self.full_name else '', + self.full_name or self.name, + self.description, + ) + + def get_line_code(self, before=0, after=0): + """ + Returns the line of code where this object was defined. + + :param before: Add n lines before the current line to the output. + :param after: Add n lines after the current line to the output. + + :return str: Returns the line(s) of code or an empty string if it's a + builtin. + """ + if not self._name.is_value_name: + return '' + + lines = self._name.get_root_context().code_lines + if lines is None: + # Probably a builtin module, just ignore in that case. + return '' + + index = self._name.start_pos[0] - 1 + start_index = max(index - before, 0) + return ''.join(lines[start_index:index + after + 1]) + + def _get_signatures(self, for_docstring=False): + if self._name.api_type == 'property': + return [] + if for_docstring and self._name.api_type == 'statement' and not self.is_stub(): + # For docstrings we don't resolve signatures if they are simple + # statements and not stubs. This is a speed optimization. + return [] + + if isinstance(self._name, MixedName): + # While this would eventually happen anyway, it's basically just a + # shortcut to not infer anything tree related, because it's really + # not necessary. + return self._name.infer_compiled_value().get_signatures() + + names = convert_names([self._name], prefer_stubs=True) + return [sig for name in names for sig in name.infer().get_signatures()] + + def get_signatures(self): + """ + Returns all potential signatures for a function or a class. Multiple + signatures are typical if you use Python stubs with ``@overload``. + + :rtype: list of :class:`BaseSignature` + """ + return [ + BaseSignature(self._inference_state, s) + for s in self._get_signatures() + ] + + def execute(self): + """ + Uses type inference to "execute" this identifier and returns the + executed objects. + + :rtype: list of :class:`Name` + """ + return _values_to_definitions(self._name.infer().execute_with_values()) + + def get_type_hint(self): + """ + Returns type hints like ``Iterable[int]`` or ``Union[int, str]``. + + This method might be quite slow, especially for functions. The problem + is finding executions for those functions to return something like + ``Callable[[int, str], str]``. + + :rtype: str + """ + return self._name.infer().get_type_hint() + + +class Completion(BaseName): + """ + ``Completion`` objects are returned from :meth:`.Script.complete`. They + provide additional information about a completion. + """ + def __init__(self, inference_state, name, stack, like_name_length, + is_fuzzy, cached_name=None): + super().__init__(inference_state, name) + + self._like_name_length = like_name_length + self._stack = stack + self._is_fuzzy = is_fuzzy + self._cached_name = cached_name + + # Completion objects with the same Completion name (which means + # duplicate items in the completion) + self._same_name_completions = [] + + def _complete(self, like_name): + append = '' + if settings.add_bracket_after_function \ + and self.type == 'function': + append = '(' + + name = self._name.get_public_name() + if like_name: + name = name[self._like_name_length:] + return name + append + + @property + def complete(self): + """ + Only works with non-fuzzy completions. Returns None if fuzzy + completions are used. + + Return the rest of the word, e.g. completing ``isinstance``:: + + isinstan# <-- Cursor is here + + would return the string 'ce'. It also adds additional stuff, depending + on your ``settings.py``. + + Assuming the following function definition:: + + def foo(param=0): + pass + + completing ``foo(par`` would give a ``Completion`` which ``complete`` + would be ``am=``. + """ + if self._is_fuzzy: + return None + return self._complete(True) + + @property + def name_with_symbols(self): + """ + Similar to :attr:`.name`, but like :attr:`.name` returns also the + symbols, for example assuming the following function definition:: + + def foo(param=0): + pass + + completing ``foo(`` would give a ``Completion`` which + ``name_with_symbols`` would be "param=". + + """ + return self._complete(False) + + def docstring(self, raw=False, fast=True): + """ + Documented under :meth:`BaseName.docstring`. + """ + if self._like_name_length >= 3: + # In this case we can just resolve the like name, because we + # wouldn't load like > 100 Python modules anymore. + fast = False + + return super().docstring(raw=raw, fast=fast) + + def _get_docstring(self): + if self._cached_name is not None: + return completion_cache.get_docstring( + self._cached_name, + self._name.get_public_name(), + lambda: self._get_cache() + ) + return super()._get_docstring() + + def _get_docstring_signature(self): + if self._cached_name is not None: + return completion_cache.get_docstring_signature( + self._cached_name, + self._name.get_public_name(), + lambda: self._get_cache() + ) + return super()._get_docstring_signature() + + def _get_cache(self): + return ( + super().type, + super()._get_docstring_signature(), + super()._get_docstring(), + ) + + @property + def type(self): + """ + Documented under :meth:`BaseName.type`. + """ + # Purely a speed optimization. + if self._cached_name is not None: + return completion_cache.get_type( + self._cached_name, + self._name.get_public_name(), + lambda: self._get_cache() + ) + + return super().type + + def get_completion_prefix_length(self): + """ + Returns the length of the prefix being completed. + For example, completing ``isinstance``:: + + isinstan# <-- Cursor is here + + would return 8, because len('isinstan') == 8. + + Assuming the following function definition:: + + def foo(param=0): + pass + + completing ``foo(par`` would return 3. + """ + return self._like_name_length + + def __repr__(self): + return '<%s: %s>' % (type(self).__name__, self._name.get_public_name()) + + +class Name(BaseName): + """ + *Name* objects are returned from many different APIs including + :meth:`.Script.goto` or :meth:`.Script.infer`. + """ + def __init__(self, inference_state, definition): + super().__init__(inference_state, definition) + + @memoize_method + def defined_names(self): + """ + List sub-definitions (e.g., methods in class). + + :rtype: list of :class:`Name` + """ + defs = self._name.infer() + return sorted( + unite(defined_names(self._inference_state, d) for d in defs), + key=lambda s: s._name.start_pos or (0, 0) + ) + + def is_definition(self): + """ + Returns True, if defined as a name in a statement, function or class. + Returns False, if it's a reference to such a definition. + """ + if self._name.tree_name is None: + return True + else: + return self._name.tree_name.is_definition() + + def __eq__(self, other): + return self._name.start_pos == other._name.start_pos \ + and self.module_path == other.module_path \ + and self.name == other.name \ + and self._inference_state == other._inference_state + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash((self._name.start_pos, self.module_path, self.name, self._inference_state)) + + +class BaseSignature(Name): + """ + These signatures are returned by :meth:`BaseName.get_signatures` + calls. + """ + def __init__(self, inference_state, signature): + super().__init__(inference_state, signature.name) + self._signature = signature + + @property + def params(self): + """ + Returns definitions for all parameters that a signature defines. + This includes stuff like ``*args`` and ``**kwargs``. + + :rtype: list of :class:`.ParamName` + """ + return [ParamName(self._inference_state, n) + for n in self._signature.get_param_names(resolve_stars=True)] + + def to_string(self): + """ + Returns a text representation of the signature. This could for example + look like ``foo(bar, baz: int, **kwargs)``. + + :rtype: str + """ + return self._signature.to_string() + + +class Signature(BaseSignature): + """ + A full signature object is the return value of + :meth:`.Script.get_signatures`. + """ + def __init__(self, inference_state, signature, call_details): + super().__init__(inference_state, signature) + self._call_details = call_details + self._signature = signature + + @property + def index(self): + """ + Returns the param index of the current cursor position. + Returns None if the index cannot be found in the curent call. + + :rtype: int + """ + return self._call_details.calculate_index( + self._signature.get_param_names(resolve_stars=True) + ) + + @property + def bracket_start(self): + """ + Returns a line/column tuple of the bracket that is responsible for the + last function call. The first line is 1 and the first column 0. + + :rtype: int, int + """ + return self._call_details.bracket_leaf.start_pos + + def __repr__(self): + return '<%s: index=%r %s>' % ( + type(self).__name__, + self.index, + self._signature.to_string(), + ) + + +class ParamName(Name): + def infer_default(self): + """ + Returns default values like the ``1`` of ``def foo(x=1):``. + + :rtype: list of :class:`.Name` + """ + return _values_to_definitions(self._name.infer_default()) + + def infer_annotation(self, **kwargs): + """ + :param execute_annotation: Default True; If False, values are not + executed and classes are returned instead of instances. + :rtype: list of :class:`.Name` + """ + return _values_to_definitions(self._name.infer_annotation(ignore_stars=True, **kwargs)) + + def to_string(self): + """ + Returns a simple representation of a param, like + ``f: Callable[..., Any]``. + + :rtype: str + """ + return self._name.to_string() + + @property + def kind(self): + """ + Returns an enum instance of :mod:`inspect`'s ``Parameter`` enum. + + :rtype: :py:attr:`inspect.Parameter.kind` + """ + return self._name.get_kind() diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/api/completion.py b/bundle/jedi-vim/pythonx/jedi/jedi/api/completion.py new file mode 100644 index 000000000..342f75065 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/api/completion.py @@ -0,0 +1,666 @@ +import re +from textwrap import dedent +from inspect import Parameter + +from parso.python.token import PythonTokenTypes +from parso.python import tree +from parso.tree import search_ancestor, Leaf +from parso import split_lines + +from jedi import debug +from jedi import settings +from jedi.api import classes +from jedi.api import helpers +from jedi.api import keywords +from jedi.api.strings import complete_dict +from jedi.api.file_name import complete_file_name +from jedi.inference import imports +from jedi.inference.base_value import ValueSet +from jedi.inference.helpers import infer_call_of_leaf, parse_dotted_names +from jedi.inference.context import get_global_filters +from jedi.inference.value import TreeInstance +from jedi.inference.docstring_utils import DocstringModule +from jedi.inference.names import ParamNameWrapper, SubModuleName +from jedi.inference.gradual.conversion import convert_values, convert_names +from jedi.parser_utils import cut_value_at_position +from jedi.plugins import plugin_manager + + +class ParamNameWithEquals(ParamNameWrapper): + def get_public_name(self): + return self.string_name + '=' + + +def _get_signature_param_names(signatures, positional_count, used_kwargs): + # Add named params + for call_sig in signatures: + for i, p in enumerate(call_sig.params): + kind = p.kind + if i < positional_count and kind == Parameter.POSITIONAL_OR_KEYWORD: + continue + if kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) \ + and p.name not in used_kwargs: + yield ParamNameWithEquals(p._name) + + +def _must_be_kwarg(signatures, positional_count, used_kwargs): + if used_kwargs: + return True + + must_be_kwarg = True + for signature in signatures: + for i, p in enumerate(signature.params): + kind = p.kind + if kind is Parameter.VAR_POSITIONAL: + # In case there were not already kwargs, the next param can + # always be a normal argument. + return False + + if i >= positional_count and kind in (Parameter.POSITIONAL_OR_KEYWORD, + Parameter.POSITIONAL_ONLY): + must_be_kwarg = False + break + if not must_be_kwarg: + break + return must_be_kwarg + + +def filter_names(inference_state, completion_names, stack, like_name, fuzzy, cached_name): + comp_dct = set() + if settings.case_insensitive_completion: + like_name = like_name.lower() + for name in completion_names: + string = name.string_name + if settings.case_insensitive_completion: + string = string.lower() + if helpers.match(string, like_name, fuzzy=fuzzy): + new = classes.Completion( + inference_state, + name, + stack, + len(like_name), + is_fuzzy=fuzzy, + cached_name=cached_name, + ) + k = (new.name, new.complete) # key + if k not in comp_dct: + comp_dct.add(k) + tree_name = name.tree_name + if tree_name is not None: + definition = tree_name.get_definition() + if definition is not None and definition.type == 'del_stmt': + continue + yield new + + +def _remove_duplicates(completions, other_completions): + names = {d.name for d in other_completions} + return [c for c in completions if c.name not in names] + + +def get_user_context(module_context, position): + """ + Returns the scope in which the user resides. This includes flows. + """ + leaf = module_context.tree_node.get_leaf_for_position(position, include_prefixes=True) + return module_context.create_context(leaf) + + +def get_flow_scope_node(module_node, position): + node = module_node.get_leaf_for_position(position, include_prefixes=True) + while not isinstance(node, (tree.Scope, tree.Flow)): + node = node.parent + + return node + + +@plugin_manager.decorate() +def complete_param_names(context, function_name, decorator_nodes): + # Basically there's no way to do param completion. The plugins are + # responsible for this. + return [] + + +class Completion: + def __init__(self, inference_state, module_context, code_lines, position, + signatures_callback, fuzzy=False): + self._inference_state = inference_state + self._module_context = module_context + self._module_node = module_context.tree_node + self._code_lines = code_lines + + # The first step of completions is to get the name + self._like_name = helpers.get_on_completion_name(self._module_node, code_lines, position) + # The actual cursor position is not what we need to calculate + # everything. We want the start of the name we're on. + self._original_position = position + self._signatures_callback = signatures_callback + + self._fuzzy = fuzzy + + def complete(self): + leaf = self._module_node.get_leaf_for_position( + self._original_position, + include_prefixes=True + ) + string, start_leaf, quote = _extract_string_while_in_string(leaf, self._original_position) + + prefixed_completions = complete_dict( + self._module_context, + self._code_lines, + start_leaf or leaf, + self._original_position, + None if string is None else quote + string, + fuzzy=self._fuzzy, + ) + + if string is not None and not prefixed_completions: + prefixed_completions = list(complete_file_name( + self._inference_state, self._module_context, start_leaf, quote, string, + self._like_name, self._signatures_callback, + self._code_lines, self._original_position, + self._fuzzy + )) + if string is not None: + if not prefixed_completions and '\n' in string: + # Complete only multi line strings + prefixed_completions = self._complete_in_string(start_leaf, string) + return prefixed_completions + + cached_name, completion_names = self._complete_python(leaf) + + completions = list(filter_names(self._inference_state, completion_names, + self.stack, self._like_name, + self._fuzzy, cached_name=cached_name)) + + return ( + # Removing duplicates mostly to remove False/True/None duplicates. + _remove_duplicates(prefixed_completions, completions) + + sorted(completions, key=lambda x: (x.name.startswith('__'), + x.name.startswith('_'), + x.name.lower())) + ) + + def _complete_python(self, leaf): + """ + Analyzes the current context of a completion and decides what to + return. + + Technically this works by generating a parser stack and analysing the + current stack for possible grammar nodes. + + Possible enhancements: + - global/nonlocal search global + - yield from / raise from <- could be only exceptions/generators + - In args: */**: no completion + - In params (also lambda): no completion before = + """ + grammar = self._inference_state.grammar + self.stack = stack = None + self._position = ( + self._original_position[0], + self._original_position[1] - len(self._like_name) + ) + cached_name = None + + try: + self.stack = stack = helpers.get_stack_at_position( + grammar, self._code_lines, leaf, self._position + ) + except helpers.OnErrorLeaf as e: + value = e.error_leaf.value + if value == '.': + # After ErrorLeaf's that are dots, we will not do any + # completions since this probably just confuses the user. + return cached_name, [] + + # If we don't have a value, just use global completion. + return cached_name, self._complete_global_scope() + + allowed_transitions = \ + list(stack._allowed_transition_names_and_token_types()) + + if 'if' in allowed_transitions: + leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True) + previous_leaf = leaf.get_previous_leaf() + + indent = self._position[1] + if not (leaf.start_pos <= self._position <= leaf.end_pos): + indent = leaf.start_pos[1] + + if previous_leaf is not None: + stmt = previous_leaf + while True: + stmt = search_ancestor( + stmt, 'if_stmt', 'for_stmt', 'while_stmt', 'try_stmt', + 'error_node', + ) + if stmt is None: + break + + type_ = stmt.type + if type_ == 'error_node': + first = stmt.children[0] + if isinstance(first, Leaf): + type_ = first.value + '_stmt' + # Compare indents + if stmt.start_pos[1] == indent: + if type_ == 'if_stmt': + allowed_transitions += ['elif', 'else'] + elif type_ == 'try_stmt': + allowed_transitions += ['except', 'finally', 'else'] + elif type_ == 'for_stmt': + allowed_transitions.append('else') + + completion_names = [] + + kwargs_only = False + if any(t in allowed_transitions for t in (PythonTokenTypes.NAME, + PythonTokenTypes.INDENT)): + # This means that we actually have to do type inference. + + nonterminals = [stack_node.nonterminal for stack_node in stack] + + nodes = _gather_nodes(stack) + if nodes and nodes[-1] in ('as', 'def', 'class'): + # No completions for ``with x as foo`` and ``import x as foo``. + # Also true for defining names as a class or function. + return cached_name, list(self._complete_inherited(is_function=True)) + elif "import_stmt" in nonterminals: + level, names = parse_dotted_names(nodes, "import_from" in nonterminals) + + only_modules = not ("import_from" in nonterminals and 'import' in nodes) + completion_names += self._get_importer_names( + names, + level, + only_modules=only_modules, + ) + elif nonterminals[-1] in ('trailer', 'dotted_name') and nodes[-1] == '.': + dot = self._module_node.get_leaf_for_position(self._position) + if dot.type == "endmarker": + # This is a bit of a weird edge case, maybe we can somehow + # generalize this. + dot = leaf.get_previous_leaf() + cached_name, n = self._complete_trailer(dot.get_previous_leaf()) + completion_names += n + elif self._is_parameter_completion(): + completion_names += self._complete_params(leaf) + else: + # Apparently this looks like it's good enough to filter most cases + # so that signature completions don't randomly appear. + # To understand why this works, three things are important: + # 1. trailer with a `,` in it is either a subscript or an arglist. + # 2. If there's no `,`, it's at the start and only signatures start + # with `(`. Other trailers could start with `.` or `[`. + # 3. Decorators are very primitive and have an optional `(` with + # optional arglist in them. + if nodes[-1] in ['(', ','] \ + and nonterminals[-1] in ('trailer', 'arglist', 'decorator'): + signatures = self._signatures_callback(*self._position) + if signatures: + call_details = signatures[0]._call_details + used_kwargs = list(call_details.iter_used_keyword_arguments()) + positional_count = call_details.count_positional_arguments() + + completion_names += _get_signature_param_names( + signatures, + positional_count, + used_kwargs, + ) + + kwargs_only = _must_be_kwarg(signatures, positional_count, used_kwargs) + + if not kwargs_only: + completion_names += self._complete_global_scope() + completion_names += self._complete_inherited(is_function=False) + + if not kwargs_only: + current_line = self._code_lines[self._position[0] - 1][:self._position[1]] + completion_names += self._complete_keywords( + allowed_transitions, + only_values=not (not current_line or current_line[-1] in ' \t.;' + and current_line[-3:] != '...') + ) + + return cached_name, completion_names + + def _is_parameter_completion(self): + tos = self.stack[-1] + if tos.nonterminal == 'lambdef' and len(tos.nodes) == 1: + # We are at the position `lambda `, where basically the next node + # is a param. + return True + if tos.nonterminal in 'parameters': + # Basically we are at the position `foo(`, there's nothing there + # yet, so we have no `typedargslist`. + return True + # var args is for lambdas and typed args for normal functions + return tos.nonterminal in ('typedargslist', 'varargslist') and tos.nodes[-1] == ',' + + def _complete_params(self, leaf): + stack_node = self.stack[-2] + if stack_node.nonterminal == 'parameters': + stack_node = self.stack[-3] + if stack_node.nonterminal == 'funcdef': + context = get_user_context(self._module_context, self._position) + node = search_ancestor(leaf, 'error_node', 'funcdef') + if node is not None: + if node.type == 'error_node': + n = node.children[0] + if n.type == 'decorators': + decorators = n.children + elif n.type == 'decorator': + decorators = [n] + else: + decorators = [] + else: + decorators = node.get_decorators() + function_name = stack_node.nodes[1] + + return complete_param_names(context, function_name.value, decorators) + return [] + + def _complete_keywords(self, allowed_transitions, only_values): + for k in allowed_transitions: + if isinstance(k, str) and k.isalpha(): + if not only_values or k in ('True', 'False', 'None'): + yield keywords.KeywordName(self._inference_state, k) + + def _complete_global_scope(self): + context = get_user_context(self._module_context, self._position) + debug.dbg('global completion scope: %s', context) + flow_scope_node = get_flow_scope_node(self._module_node, self._position) + filters = get_global_filters( + context, + self._position, + flow_scope_node + ) + completion_names = [] + for filter in filters: + completion_names += filter.values() + return completion_names + + def _complete_trailer(self, previous_leaf): + inferred_context = self._module_context.create_context(previous_leaf) + values = infer_call_of_leaf(inferred_context, previous_leaf) + debug.dbg('trailer completion values: %s', values, color='MAGENTA') + + # The cached name simply exists to make speed optimizations for certain + # modules. + cached_name = None + if len(values) == 1: + v, = values + if v.is_module(): + if len(v.string_names) == 1: + module_name = v.string_names[0] + if module_name in ('numpy', 'tensorflow', 'matplotlib', 'pandas'): + cached_name = module_name + + return cached_name, self._complete_trailer_for_values(values) + + def _complete_trailer_for_values(self, values): + user_context = get_user_context(self._module_context, self._position) + + return complete_trailer(user_context, values) + + def _get_importer_names(self, names, level=0, only_modules=True): + names = [n.value for n in names] + i = imports.Importer(self._inference_state, names, self._module_context, level) + return i.completion_names(self._inference_state, only_modules=only_modules) + + def _complete_inherited(self, is_function=True): + """ + Autocomplete inherited methods when overriding in child class. + """ + leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True) + cls = tree.search_ancestor(leaf, 'classdef') + if cls is None: + return + + # Complete the methods that are defined in the super classes. + class_value = self._module_context.create_value(cls) + + if cls.start_pos[1] >= leaf.start_pos[1]: + return + + filters = class_value.get_filters(is_instance=True) + # The first dict is the dictionary of class itself. + next(filters) + for filter in filters: + for name in filter.values(): + # TODO we should probably check here for properties + if (name.api_type == 'function') == is_function: + yield name + + def _complete_in_string(self, start_leaf, string): + """ + To make it possible for people to have completions in doctests or + generally in "Python" code in docstrings, we use the following + heuristic: + + - Having an indented block of code + - Having some doctest code that starts with `>>>` + - Having backticks that doesn't have whitespace inside it + """ + def iter_relevant_lines(lines): + include_next_line = False + for l in code_lines: + if include_next_line or l.startswith('>>>') or l.startswith(' '): + yield re.sub(r'^( *>>> ?| +)', '', l) + else: + yield None + + include_next_line = bool(re.match(' *>>>', l)) + + string = dedent(string) + code_lines = split_lines(string, keepends=True) + relevant_code_lines = list(iter_relevant_lines(code_lines)) + if relevant_code_lines[-1] is not None: + # Some code lines might be None, therefore get rid of that. + relevant_code_lines = ['\n' if c is None else c for c in relevant_code_lines] + return self._complete_code_lines(relevant_code_lines) + match = re.search(r'`([^`\s]+)', code_lines[-1]) + if match: + return self._complete_code_lines([match.group(1)]) + return [] + + def _complete_code_lines(self, code_lines): + module_node = self._inference_state.grammar.parse(''.join(code_lines)) + module_value = DocstringModule( + in_module_context=self._module_context, + inference_state=self._inference_state, + module_node=module_node, + code_lines=code_lines, + ) + return Completion( + self._inference_state, + module_value.as_context(), + code_lines=code_lines, + position=module_node.end_pos, + signatures_callback=lambda *args, **kwargs: [], + fuzzy=self._fuzzy + ).complete() + + +def _gather_nodes(stack): + nodes = [] + for stack_node in stack: + if stack_node.dfa.from_rule == 'small_stmt': + nodes = [] + else: + nodes += stack_node.nodes + return nodes + + +_string_start = re.compile(r'^\w*(\'{3}|"{3}|\'|")') + + +def _extract_string_while_in_string(leaf, position): + def return_part_of_leaf(leaf): + kwargs = {} + if leaf.line == position[0]: + kwargs['endpos'] = position[1] - leaf.column + match = _string_start.match(leaf.value, **kwargs) + if not match: + return None, None, None + start = match.group(0) + if leaf.line == position[0] and position[1] < leaf.column + match.end(): + return None, None, None + return cut_value_at_position(leaf, position)[match.end():], leaf, start + + if position < leaf.start_pos: + return None, None, None + + if leaf.type == 'string': + return return_part_of_leaf(leaf) + + leaves = [] + while leaf is not None: + if leaf.type == 'error_leaf' and ('"' in leaf.value or "'" in leaf.value): + if len(leaf.value) > 1: + return return_part_of_leaf(leaf) + prefix_leaf = None + if not leaf.prefix: + prefix_leaf = leaf.get_previous_leaf() + if prefix_leaf is None or prefix_leaf.type != 'name' \ + or not all(c in 'rubf' for c in prefix_leaf.value.lower()): + prefix_leaf = None + + return ( + ''.join(cut_value_at_position(l, position) for l in leaves), + prefix_leaf or leaf, + ('' if prefix_leaf is None else prefix_leaf.value) + + cut_value_at_position(leaf, position), + ) + if leaf.line != position[0]: + # Multi line strings are always simple error leaves and contain the + # whole string, single line error leaves are atherefore important + # now and since the line is different, it's not really a single + # line string anymore. + break + leaves.insert(0, leaf) + leaf = leaf.get_previous_leaf() + return None, None, None + + +def complete_trailer(user_context, values): + completion_names = [] + for value in values: + for filter in value.get_filters(origin_scope=user_context.tree_node): + completion_names += filter.values() + + if not value.is_stub() and isinstance(value, TreeInstance): + completion_names += _complete_getattr(user_context, value) + + python_values = convert_values(values) + for c in python_values: + if c not in values: + for filter in c.get_filters(origin_scope=user_context.tree_node): + completion_names += filter.values() + return completion_names + + +def _complete_getattr(user_context, instance): + """ + A heuristic to make completion for proxy objects work. This is not + intended to work in all cases. It works exactly in this case: + + def __getattr__(self, name): + ... + return getattr(any_object, name) + + It is important that the return contains getattr directly, otherwise it + won't work anymore. It's really just a stupid heuristic. It will not + work if you write e.g. `return (getatr(o, name))`, because of the + additional parentheses. It will also not work if you move the getattr + to some other place that is not the return statement itself. + + It is intentional that it doesn't work in all cases. Generally it's + really hard to do even this case (as you can see below). Most people + will write it like this anyway and the other ones, well they are just + out of luck I guess :) ~dave. + """ + names = (instance.get_function_slot_names('__getattr__') + or instance.get_function_slot_names('__getattribute__')) + functions = ValueSet.from_sets( + name.infer() + for name in names + ) + for func in functions: + tree_node = func.tree_node + if tree_node is None or tree_node.type != 'funcdef': + continue + + for return_stmt in tree_node.iter_return_stmts(): + # Basically until the next comment we just try to find out if a + # return statement looks exactly like `return getattr(x, name)`. + if return_stmt.type != 'return_stmt': + continue + atom_expr = return_stmt.children[1] + if atom_expr.type != 'atom_expr': + continue + atom = atom_expr.children[0] + trailer = atom_expr.children[1] + if len(atom_expr.children) != 2 or atom.type != 'name' \ + or atom.value != 'getattr': + continue + arglist = trailer.children[1] + if arglist.type != 'arglist' or len(arglist.children) < 3: + continue + context = func.as_context() + object_node = arglist.children[0] + + # Make sure it's a param: foo in __getattr__(self, foo) + name_node = arglist.children[2] + name_list = context.goto(name_node, name_node.start_pos) + if not any(n.api_type == 'param' for n in name_list): + continue + + # Now that we know that these are most probably completion + # objects, we just infer the object and return them as + # completions. + objects = context.infer_node(object_node) + return complete_trailer(user_context, objects) + return [] + + +def search_in_module(inference_state, module_context, names, wanted_names, + wanted_type, complete=False, fuzzy=False, + ignore_imports=False, convert=False): + for s in wanted_names[:-1]: + new_names = [] + for n in names: + if s == n.string_name: + if n.tree_name is not None and n.api_type in ('module', 'namespace') \ + and ignore_imports: + continue + new_names += complete_trailer( + module_context, + n.infer() + ) + debug.dbg('dot lookup on search %s from %s', new_names, names[:10]) + names = new_names + + last_name = wanted_names[-1].lower() + for n in names: + string = n.string_name.lower() + if complete and helpers.match(string, last_name, fuzzy=fuzzy) \ + or not complete and string == last_name: + if isinstance(n, SubModuleName): + names = [v.name for v in n.infer()] + else: + names = [n] + if convert: + names = convert_names(names) + for n2 in names: + if complete: + def_ = classes.Completion( + inference_state, n2, + stack=None, + like_name_length=len(last_name), + is_fuzzy=fuzzy, + ) + else: + def_ = classes.Name(inference_state, n2) + if not wanted_type or wanted_type == def_.type: + yield def_ diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/api/completion_cache.py b/bundle/jedi-vim/pythonx/jedi/jedi/api/completion_cache.py new file mode 100644 index 000000000..46e9bead2 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/api/completion_cache.py @@ -0,0 +1,31 @@ +from typing import Dict, Tuple, Callable + +CacheValues = Tuple[str, str, str] +CacheValuesCallback = Callable[[], CacheValues] + + +_cache: Dict[str, Dict[str, CacheValues]] = {} + + +def save_entry(module_name: str, name: str, cache: CacheValues) -> None: + try: + module_cache = _cache[module_name] + except KeyError: + module_cache = _cache[module_name] = {} + module_cache[name] = cache + + +def _create_get_from_cache(number: int) -> Callable[[str, str, CacheValuesCallback], str]: + def _get_from_cache(module_name: str, name: str, get_cache_values: CacheValuesCallback) -> str: + try: + return _cache[module_name][name][number] + except KeyError: + v = get_cache_values() + save_entry(module_name, name, v) + return v[number] + return _get_from_cache + + +get_type = _create_get_from_cache(0) +get_docstring_signature = _create_get_from_cache(1) +get_docstring = _create_get_from_cache(2) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/api/environment.py b/bundle/jedi-vim/pythonx/jedi/jedi/api/environment.py new file mode 100644 index 000000000..aea96c476 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/api/environment.py @@ -0,0 +1,467 @@ +""" +Environments are a way to activate different Python versions or Virtualenvs for +static analysis. The Python binary in that environment is going to be executed. +""" +import os +import sys +import hashlib +import filecmp +from collections import namedtuple +from shutil import which + +from jedi.cache import memoize_method, time_cache +from jedi.inference.compiled.subprocess import CompiledSubprocess, \ + InferenceStateSameProcess, InferenceStateSubprocess + +import parso + +_VersionInfo = namedtuple('VersionInfo', 'major minor micro') + +_SUPPORTED_PYTHONS = ['3.10', '3.9', '3.8', '3.7', '3.6'] +_SAFE_PATHS = ['/usr/bin', '/usr/local/bin'] +_CONDA_VAR = 'CONDA_PREFIX' +_CURRENT_VERSION = '%s.%s' % (sys.version_info.major, sys.version_info.minor) + + +class InvalidPythonEnvironment(Exception): + """ + If you see this exception, the Python executable or Virtualenv you have + been trying to use is probably not a correct Python version. + """ + + +class _BaseEnvironment: + @memoize_method + def get_grammar(self): + version_string = '%s.%s' % (self.version_info.major, self.version_info.minor) + return parso.load_grammar(version=version_string) + + @property + def _sha256(self): + try: + return self._hash + except AttributeError: + self._hash = _calculate_sha256_for_file(self.executable) + return self._hash + + +def _get_info(): + return ( + sys.executable, + sys.prefix, + sys.version_info[:3], + ) + + +class Environment(_BaseEnvironment): + """ + This class is supposed to be created by internal Jedi architecture. You + should not create it directly. Please use create_environment or the other + functions instead. It is then returned by that function. + """ + _subprocess = None + + def __init__(self, executable, env_vars=None): + self._start_executable = executable + self._env_vars = env_vars + # Initialize the environment + self._get_subprocess() + + def _get_subprocess(self): + if self._subprocess is not None and not self._subprocess.is_crashed: + return self._subprocess + + try: + self._subprocess = CompiledSubprocess(self._start_executable, + env_vars=self._env_vars) + info = self._subprocess._send(None, _get_info) + except Exception as exc: + raise InvalidPythonEnvironment( + "Could not get version information for %r: %r" % ( + self._start_executable, + exc)) + + # Since it could change and might not be the same(?) as the one given, + # set it here. + self.executable = info[0] + """ + The Python executable, matches ``sys.executable``. + """ + self.path = info[1] + """ + The path to an environment, matches ``sys.prefix``. + """ + self.version_info = _VersionInfo(*info[2]) + """ + Like :data:`sys.version_info`: a tuple to show the current + Environment's Python version. + """ + return self._subprocess + + def __repr__(self): + version = '.'.join(str(i) for i in self.version_info) + return '<%s: %s in %s>' % (self.__class__.__name__, version, self.path) + + def get_inference_state_subprocess(self, inference_state): + return InferenceStateSubprocess(inference_state, self._get_subprocess()) + + @memoize_method + def get_sys_path(self): + """ + The sys path for this environment. Does not include potential + modifications from e.g. appending to :data:`sys.path`. + + :returns: list of str + """ + # It's pretty much impossible to generate the sys path without actually + # executing Python. The sys path (when starting with -S) itself depends + # on how the Python version was compiled (ENV variables). + # If you omit -S when starting Python (normal case), additionally + # site.py gets executed. + return self._get_subprocess().get_sys_path() + + +class _SameEnvironmentMixin: + def __init__(self): + self._start_executable = self.executable = sys.executable + self.path = sys.prefix + self.version_info = _VersionInfo(*sys.version_info[:3]) + self._env_vars = None + + +class SameEnvironment(_SameEnvironmentMixin, Environment): + pass + + +class InterpreterEnvironment(_SameEnvironmentMixin, _BaseEnvironment): + def get_inference_state_subprocess(self, inference_state): + return InferenceStateSameProcess(inference_state) + + def get_sys_path(self): + return sys.path + + +def _get_virtual_env_from_var(env_var='VIRTUAL_ENV'): + """Get virtualenv environment from VIRTUAL_ENV environment variable. + + It uses `safe=False` with ``create_environment``, because the environment + variable is considered to be safe / controlled by the user solely. + """ + var = os.environ.get(env_var) + if var: + # Under macOS in some cases - notably when using Pipenv - the + # sys.prefix of the virtualenv is /path/to/env/bin/.. instead of + # /path/to/env so we need to fully resolve the paths in order to + # compare them. + if os.path.realpath(var) == os.path.realpath(sys.prefix): + return _try_get_same_env() + + try: + return create_environment(var, safe=False) + except InvalidPythonEnvironment: + pass + + +def _calculate_sha256_for_file(path): + sha256 = hashlib.sha256() + with open(path, 'rb') as f: + for block in iter(lambda: f.read(filecmp.BUFSIZE), b''): + sha256.update(block) + return sha256.hexdigest() + + +def get_default_environment(): + """ + Tries to return an active Virtualenv or conda environment. + If there is no VIRTUAL_ENV variable or no CONDA_PREFIX variable set + set it will return the latest Python version installed on the system. This + makes it possible to use as many new Python features as possible when using + autocompletion and other functionality. + + :returns: :class:`.Environment` + """ + virtual_env = _get_virtual_env_from_var() + if virtual_env is not None: + return virtual_env + + conda_env = _get_virtual_env_from_var(_CONDA_VAR) + if conda_env is not None: + return conda_env + + return _try_get_same_env() + + +def _try_get_same_env(): + env = SameEnvironment() + if not os.path.basename(env.executable).lower().startswith('python'): + # This tries to counter issues with embedding. In some cases (e.g. + # VIM's Python Mac/Windows, sys.executable is /foo/bar/vim. This + # happens, because for Mac a function called `_NSGetExecutablePath` is + # used and for Windows `GetModuleFileNameW`. These are both platform + # specific functions. For all other systems sys.executable should be + # alright. However here we try to generalize: + # + # 1. Check if the executable looks like python (heuristic) + # 2. In case it's not try to find the executable + # 3. In case we don't find it use an interpreter environment. + # + # The last option will always work, but leads to potential crashes of + # Jedi - which is ok, because it happens very rarely and even less, + # because the code below should work for most cases. + if os.name == 'nt': + # The first case would be a virtualenv and the second a normal + # Python installation. + checks = (r'Scripts\python.exe', 'python.exe') + else: + # For unix it looks like Python is always in a bin folder. + checks = ( + 'bin/python%s.%s' % (sys.version_info[0], sys.version[1]), + 'bin/python%s' % (sys.version_info[0]), + 'bin/python', + ) + for check in checks: + guess = os.path.join(sys.exec_prefix, check) + if os.path.isfile(guess): + # Bingo - We think we have our Python. + return Environment(guess) + # It looks like there is no reasonable Python to be found. + return InterpreterEnvironment() + # If no virtualenv is found, use the environment we're already + # using. + return env + + +def get_cached_default_environment(): + var = os.environ.get('VIRTUAL_ENV') or os.environ.get(_CONDA_VAR) + environment = _get_cached_default_environment() + + # Under macOS in some cases - notably when using Pipenv - the + # sys.prefix of the virtualenv is /path/to/env/bin/.. instead of + # /path/to/env so we need to fully resolve the paths in order to + # compare them. + if var and os.path.realpath(var) != os.path.realpath(environment.path): + _get_cached_default_environment.clear_cache() + return _get_cached_default_environment() + return environment + + +@time_cache(seconds=10 * 60) # 10 Minutes +def _get_cached_default_environment(): + try: + return get_default_environment() + except InvalidPythonEnvironment: + # It's possible that `sys.executable` is wrong. Typically happens + # when Jedi is used in an executable that embeds Python. For further + # information, have a look at: + # https://github.com/davidhalter/jedi/issues/1531 + return InterpreterEnvironment() + + +def find_virtualenvs(paths=None, *, safe=True, use_environment_vars=True): + """ + :param paths: A list of paths in your file system to be scanned for + Virtualenvs. It will search in these paths and potentially execute the + Python binaries. + :param safe: Default True. In case this is False, it will allow this + function to execute potential `python` environments. An attacker might + be able to drop an executable in a path this function is searching by + default. If the executable has not been installed by root, it will not + be executed. + :param use_environment_vars: Default True. If True, the VIRTUAL_ENV + variable will be checked if it contains a valid VirtualEnv. + CONDA_PREFIX will be checked to see if it contains a valid conda + environment. + + :yields: :class:`.Environment` + """ + if paths is None: + paths = [] + + _used_paths = set() + + if use_environment_vars: + # Using this variable should be safe, because attackers might be + # able to drop files (via git) but not environment variables. + virtual_env = _get_virtual_env_from_var() + if virtual_env is not None: + yield virtual_env + _used_paths.add(virtual_env.path) + + conda_env = _get_virtual_env_from_var(_CONDA_VAR) + if conda_env is not None: + yield conda_env + _used_paths.add(conda_env.path) + + for directory in paths: + if not os.path.isdir(directory): + continue + + directory = os.path.abspath(directory) + for path in os.listdir(directory): + path = os.path.join(directory, path) + if path in _used_paths: + # A path shouldn't be inferred twice. + continue + _used_paths.add(path) + + try: + executable = _get_executable_path(path, safe=safe) + yield Environment(executable) + except InvalidPythonEnvironment: + pass + + +def find_system_environments(*, env_vars=None): + """ + Ignores virtualenvs and returns the Python versions that were installed on + your system. This might return nothing, if you're running Python e.g. from + a portable version. + + The environments are sorted from latest to oldest Python version. + + :yields: :class:`.Environment` + """ + for version_string in _SUPPORTED_PYTHONS: + try: + yield get_system_environment(version_string, env_vars=env_vars) + except InvalidPythonEnvironment: + pass + + +# TODO: this function should probably return a list of environments since +# multiple Python installations can be found on a system for the same version. +def get_system_environment(version, *, env_vars=None): + """ + Return the first Python environment found for a string of the form 'X.Y' + where X and Y are the major and minor versions of Python. + + :raises: :exc:`.InvalidPythonEnvironment` + :returns: :class:`.Environment` + """ + exe = which('python' + version) + if exe: + if exe == sys.executable: + return SameEnvironment() + return Environment(exe) + + if os.name == 'nt': + for exe in _get_executables_from_windows_registry(version): + try: + return Environment(exe, env_vars=env_vars) + except InvalidPythonEnvironment: + pass + raise InvalidPythonEnvironment("Cannot find executable python%s." % version) + + +def create_environment(path, *, safe=True, env_vars=None): + """ + Make it possible to manually create an Environment object by specifying a + Virtualenv path or an executable path and optional environment variables. + + :raises: :exc:`.InvalidPythonEnvironment` + :returns: :class:`.Environment` + """ + if os.path.isfile(path): + _assert_safe(path, safe) + return Environment(path, env_vars=env_vars) + return Environment(_get_executable_path(path, safe=safe), env_vars=env_vars) + + +def _get_executable_path(path, safe=True): + """ + Returns None if it's not actually a virtual env. + """ + + if os.name == 'nt': + python = os.path.join(path, 'Scripts', 'python.exe') + else: + python = os.path.join(path, 'bin', 'python') + if not os.path.exists(python): + raise InvalidPythonEnvironment("%s seems to be missing." % python) + + _assert_safe(python, safe) + return python + + +def _get_executables_from_windows_registry(version): + # https://github.com/python/typeshed/pull/3794 adds winreg + import winreg # type: ignore[import] + + # TODO: support Python Anaconda. + sub_keys = [ + r'SOFTWARE\Python\PythonCore\{version}\InstallPath', + r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}\InstallPath', + r'SOFTWARE\Python\PythonCore\{version}-32\InstallPath', + r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}-32\InstallPath' + ] + for root_key in [winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE]: + for sub_key in sub_keys: + sub_key = sub_key.format(version=version) + try: + with winreg.OpenKey(root_key, sub_key) as key: + prefix = winreg.QueryValueEx(key, '')[0] + exe = os.path.join(prefix, 'python.exe') + if os.path.isfile(exe): + yield exe + except WindowsError: + pass + + +def _assert_safe(executable_path, safe): + if safe and not _is_safe(executable_path): + raise InvalidPythonEnvironment( + "The python binary is potentially unsafe.") + + +def _is_safe(executable_path): + # Resolve sym links. A venv typically is a symlink to a known Python + # binary. Only virtualenvs copy symlinks around. + real_path = os.path.realpath(executable_path) + + if _is_unix_safe_simple(real_path): + return True + + # Just check the list of known Python versions. If it's not in there, + # it's likely an attacker or some Python that was not properly + # installed in the system. + for environment in find_system_environments(): + if environment.executable == real_path: + return True + + # If the versions don't match, just compare the binary files. If we + # don't do that, only venvs will be working and not virtualenvs. + # venvs are symlinks while virtualenvs are actual copies of the + # Python files. + # This still means that if the system Python is updated and the + # virtualenv's Python is not (which is probably never going to get + # upgraded), it will not work with Jedi. IMO that's fine, because + # people should just be using venv. ~ dave + if environment._sha256 == _calculate_sha256_for_file(real_path): + return True + return False + + +def _is_unix_safe_simple(real_path): + if _is_unix_admin(): + # In case we are root, just be conservative and + # only execute known paths. + return any(real_path.startswith(p) for p in _SAFE_PATHS) + + uid = os.stat(real_path).st_uid + # The interpreter needs to be owned by root. This means that it wasn't + # written by a user and therefore attacking Jedi is not as simple. + # The attack could look like the following: + # 1. A user clones a repository. + # 2. The repository has an innocent looking folder called foobar. jedi + # searches for the folder and executes foobar/bin/python --version if + # there's also a foobar/bin/activate. + # 3. The attacker has gained code execution, since he controls + # foobar/bin/python. + return uid == 0 + + +def _is_unix_admin(): + try: + return os.getuid() == 0 + except AttributeError: + return False # Windows diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/api/errors.py b/bundle/jedi-vim/pythonx/jedi/jedi/api/errors.py new file mode 100644 index 000000000..10cb62af1 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/api/errors.py @@ -0,0 +1,46 @@ +""" +This file is about errors in Python files and not about exception handling in +Jedi. +""" + + +def parso_to_jedi_errors(grammar, module_node): + return [SyntaxError(e) for e in grammar.iter_errors(module_node)] + + +class SyntaxError: + """ + Syntax errors are generated by :meth:`.Script.get_syntax_errors`. + """ + def __init__(self, parso_error): + self._parso_error = parso_error + + @property + def line(self): + """The line where the error starts (starting with 1).""" + return self._parso_error.start_pos[0] + + @property + def column(self): + """The column where the error starts (starting with 0).""" + return self._parso_error.start_pos[1] + + @property + def until_line(self): + """The line where the error ends (starting with 1).""" + return self._parso_error.end_pos[0] + + @property + def until_column(self): + """The column where the error ends (starting with 0).""" + return self._parso_error.end_pos[1] + + def get_message(self): + return self._parso_error.message + + def __repr__(self): + return '<%s from=%s to=%s>' % ( + self.__class__.__name__, + self._parso_error.start_pos, + self._parso_error.end_pos, + ) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/api/exceptions.py b/bundle/jedi-vim/pythonx/jedi/jedi/api/exceptions.py new file mode 100644 index 000000000..db66a5f4c --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/api/exceptions.py @@ -0,0 +1,31 @@ +class _JediError(Exception): + pass + + +class InternalError(_JediError): + """ + This error might happen a subprocess is crashing. The reason for this is + usually broken C code in third party libraries. This is not a very common + thing and it is safe to use Jedi again. However using the same calls might + result in the same error again. + """ + + +class WrongVersion(_JediError): + """ + This error is reserved for the future, shouldn't really be happening at the + moment. + """ + + +class RefactoringError(_JediError): + """ + Refactorings can fail for various reasons. So if you work with refactorings + like :meth:`.Script.rename`, :meth:`.Script.inline`, + :meth:`.Script.extract_variable` and :meth:`.Script.extract_function`, make + sure to catch these. The descriptions in the errors are ususally valuable + for end users. + + A typical ``RefactoringError`` would tell the user that inlining is not + possible if no name is under the cursor. + """ diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/api/file_name.py b/bundle/jedi-vim/pythonx/jedi/jedi/api/file_name.py new file mode 100644 index 000000000..277f3220c --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/api/file_name.py @@ -0,0 +1,155 @@ +import os + +from jedi.api import classes +from jedi.api.strings import StringName, get_quote_ending +from jedi.api.helpers import match +from jedi.inference.helpers import get_str_or_none + + +class PathName(StringName): + api_type = 'path' + + +def complete_file_name(inference_state, module_context, start_leaf, quote, string, + like_name, signatures_callback, code_lines, position, fuzzy): + # First we want to find out what can actually be changed as a name. + like_name_length = len(os.path.basename(string)) + + addition = _get_string_additions(module_context, start_leaf) + if string.startswith('~'): + string = os.path.expanduser(string) + if addition is None: + return + string = addition + string + + # Here we use basename again, because if strings are added like + # `'foo' + 'bar`, it should complete to `foobar/`. + must_start_with = os.path.basename(string) + string = os.path.dirname(string) + + sigs = signatures_callback(*position) + is_in_os_path_join = sigs and all(s.full_name == 'os.path.join' for s in sigs) + if is_in_os_path_join: + to_be_added = _add_os_path_join(module_context, start_leaf, sigs[0].bracket_start) + if to_be_added is None: + is_in_os_path_join = False + else: + string = to_be_added + string + base_path = os.path.join(inference_state.project.path, string) + try: + listed = sorted(os.scandir(base_path), key=lambda e: e.name) + # OSError: [Errno 36] File name too long: '...' + except (FileNotFoundError, OSError): + return + quote_ending = get_quote_ending(quote, code_lines, position) + for entry in listed: + name = entry.name + if match(name, must_start_with, fuzzy=fuzzy): + if is_in_os_path_join or not entry.is_dir(): + name += quote_ending + else: + name += os.path.sep + + yield classes.Completion( + inference_state, + PathName(inference_state, name[len(must_start_with) - like_name_length:]), + stack=None, + like_name_length=like_name_length, + is_fuzzy=fuzzy, + ) + + +def _get_string_additions(module_context, start_leaf): + def iterate_nodes(): + node = addition.parent + was_addition = True + for child_node in reversed(node.children[:node.children.index(addition)]): + if was_addition: + was_addition = False + yield child_node + continue + + if child_node != '+': + break + was_addition = True + + addition = start_leaf.get_previous_leaf() + if addition != '+': + return '' + context = module_context.create_context(start_leaf) + return _add_strings(context, reversed(list(iterate_nodes()))) + + +def _add_strings(context, nodes, add_slash=False): + string = '' + first = True + for child_node in nodes: + values = context.infer_node(child_node) + if len(values) != 1: + return None + c, = values + s = get_str_or_none(c) + if s is None: + return None + if not first and add_slash: + string += os.path.sep + string += s + first = False + return string + + +def _add_os_path_join(module_context, start_leaf, bracket_start): + def check(maybe_bracket, nodes): + if maybe_bracket.start_pos != bracket_start: + return None + + if not nodes: + return '' + context = module_context.create_context(nodes[0]) + return _add_strings(context, nodes, add_slash=True) or '' + + if start_leaf.type == 'error_leaf': + # Unfinished string literal, like `join('` + value_node = start_leaf.parent + index = value_node.children.index(start_leaf) + if index > 0: + error_node = value_node.children[index - 1] + if error_node.type == 'error_node' and len(error_node.children) >= 2: + index = -2 + if error_node.children[-1].type == 'arglist': + arglist_nodes = error_node.children[-1].children + index -= 1 + else: + arglist_nodes = [] + + return check(error_node.children[index + 1], arglist_nodes[::2]) + return None + + # Maybe an arglist or some weird error case. Therefore checked below. + searched_node_child = start_leaf + while searched_node_child.parent is not None \ + and searched_node_child.parent.type not in ('arglist', 'trailer', 'error_node'): + searched_node_child = searched_node_child.parent + + if searched_node_child.get_first_leaf() is not start_leaf: + return None + searched_node = searched_node_child.parent + if searched_node is None: + return None + + index = searched_node.children.index(searched_node_child) + arglist_nodes = searched_node.children[:index] + if searched_node.type == 'arglist': + trailer = searched_node.parent + if trailer.type == 'error_node': + trailer_index = trailer.children.index(searched_node) + assert trailer_index >= 2 + assert trailer.children[trailer_index - 1] == '(' + return check(trailer.children[trailer_index - 1], arglist_nodes[::2]) + elif trailer.type == 'trailer': + return check(trailer.children[0], arglist_nodes[::2]) + elif searched_node.type == 'trailer': + return check(searched_node.children[0], []) + elif searched_node.type == 'error_node': + # Stuff like `join(""` + return check(arglist_nodes[-1], []) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/api/helpers.py b/bundle/jedi-vim/pythonx/jedi/jedi/api/helpers.py new file mode 100644 index 000000000..e8ce38819 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/api/helpers.py @@ -0,0 +1,522 @@ +""" +Helpers for the API +""" +import re +from collections import namedtuple +from textwrap import dedent +from itertools import chain +from functools import wraps +from inspect import Parameter + +from parso.python.parser import Parser +from parso.python import tree + +from jedi.inference.base_value import NO_VALUES +from jedi.inference.syntax_tree import infer_atom +from jedi.inference.helpers import infer_call_of_leaf +from jedi.inference.compiled import get_string_value_set +from jedi.cache import signature_time_cache, memoize_method +from jedi.parser_utils import get_parent_scope + + +CompletionParts = namedtuple('CompletionParts', ['path', 'has_dot', 'name']) + + +def _start_match(string, like_name): + return string.startswith(like_name) + + +def _fuzzy_match(string, like_name): + if len(like_name) <= 1: + return like_name in string + pos = string.find(like_name[0]) + if pos >= 0: + return _fuzzy_match(string[pos + 1:], like_name[1:]) + return False + + +def match(string, like_name, fuzzy=False): + if fuzzy: + return _fuzzy_match(string, like_name) + else: + return _start_match(string, like_name) + + +def sorted_definitions(defs): + # Note: `or ''` below is required because `module_path` could be + return sorted(defs, key=lambda x: (str(x.module_path or ''), + x.line or 0, + x.column or 0, + x.name)) + + +def get_on_completion_name(module_node, lines, position): + leaf = module_node.get_leaf_for_position(position) + if leaf is None or leaf.type in ('string', 'error_leaf'): + # Completions inside strings are a bit special, we need to parse the + # string. The same is true for comments and error_leafs. + line = lines[position[0] - 1] + # The first step of completions is to get the name + return re.search(r'(?!\d)\w+$|$', line[:position[1]]).group(0) + elif leaf.type not in ('name', 'keyword'): + return '' + + return leaf.value[:position[1] - leaf.start_pos[1]] + + +def _get_code(code_lines, start_pos, end_pos): + # Get relevant lines. + lines = code_lines[start_pos[0] - 1:end_pos[0]] + # Remove the parts at the end of the line. + lines[-1] = lines[-1][:end_pos[1]] + # Remove first line indentation. + lines[0] = lines[0][start_pos[1]:] + return ''.join(lines) + + +class OnErrorLeaf(Exception): + @property + def error_leaf(self): + return self.args[0] + + +def _get_code_for_stack(code_lines, leaf, position): + # It might happen that we're on whitespace or on a comment. This means + # that we would not get the right leaf. + if leaf.start_pos >= position: + # If we're not on a comment simply get the previous leaf and proceed. + leaf = leaf.get_previous_leaf() + if leaf is None: + return '' # At the beginning of the file. + + is_after_newline = leaf.type == 'newline' + while leaf.type == 'newline': + leaf = leaf.get_previous_leaf() + if leaf is None: + return '' + + if leaf.type == 'error_leaf' or leaf.type == 'string': + if leaf.start_pos[0] < position[0]: + # On a different line, we just begin anew. + return '' + + # Error leafs cannot be parsed, completion in strings is also + # impossible. + raise OnErrorLeaf(leaf) + else: + user_stmt = leaf + while True: + if user_stmt.parent.type in ('file_input', 'suite', 'simple_stmt'): + break + user_stmt = user_stmt.parent + + if is_after_newline: + if user_stmt.start_pos[1] > position[1]: + # This means that it's actually a dedent and that means that we + # start without value (part of a suite). + return '' + + # This is basically getting the relevant lines. + return _get_code(code_lines, user_stmt.get_start_pos_of_prefix(), position) + + +def get_stack_at_position(grammar, code_lines, leaf, pos): + """ + Returns the possible node names (e.g. import_from, xor_test or yield_stmt). + """ + class EndMarkerReached(Exception): + pass + + def tokenize_without_endmarker(code): + # TODO This is for now not an official parso API that exists purely + # for Jedi. + tokens = grammar._tokenize(code) + for token in tokens: + if token.string == safeword: + raise EndMarkerReached() + elif token.prefix.endswith(safeword): + # This happens with comments. + raise EndMarkerReached() + elif token.string.endswith(safeword): + yield token # Probably an f-string literal that was not finished. + raise EndMarkerReached() + else: + yield token + + # The code might be indedented, just remove it. + code = dedent(_get_code_for_stack(code_lines, leaf, pos)) + # We use a word to tell Jedi when we have reached the start of the + # completion. + # Use Z as a prefix because it's not part of a number suffix. + safeword = 'ZZZ_USER_WANTS_TO_COMPLETE_HERE_WITH_JEDI' + code = code + ' ' + safeword + + p = Parser(grammar._pgen_grammar, error_recovery=True) + try: + p.parse(tokens=tokenize_without_endmarker(code)) + except EndMarkerReached: + return p.stack + raise SystemError( + "This really shouldn't happen. There's a bug in Jedi:\n%s" + % list(tokenize_without_endmarker(code)) + ) + + +def infer(inference_state, context, leaf): + if leaf.type == 'name': + return inference_state.infer(context, leaf) + + parent = leaf.parent + definitions = NO_VALUES + if parent.type == 'atom': + # e.g. `(a + b)` + definitions = context.infer_node(leaf.parent) + elif parent.type == 'trailer': + # e.g. `a()` + definitions = infer_call_of_leaf(context, leaf) + elif isinstance(leaf, tree.Literal): + # e.g. `"foo"` or `1.0` + return infer_atom(context, leaf) + elif leaf.type in ('fstring_string', 'fstring_start', 'fstring_end'): + return get_string_value_set(inference_state) + return definitions + + +def filter_follow_imports(names, follow_builtin_imports=False): + for name in names: + if name.is_import(): + new_names = list(filter_follow_imports( + name.goto(), + follow_builtin_imports=follow_builtin_imports, + )) + found_builtin = False + if follow_builtin_imports: + for new_name in new_names: + if new_name.start_pos is None: + found_builtin = True + + if found_builtin: + yield name + else: + yield from new_names + else: + yield name + + +class CallDetails: + def __init__(self, bracket_leaf, children, position): + self.bracket_leaf = bracket_leaf + self._children = children + self._position = position + + @property + def index(self): + return _get_index_and_key(self._children, self._position)[0] + + @property + def keyword_name_str(self): + return _get_index_and_key(self._children, self._position)[1] + + @memoize_method + def _list_arguments(self): + return list(_iter_arguments(self._children, self._position)) + + def calculate_index(self, param_names): + positional_count = 0 + used_names = set() + star_count = -1 + args = self._list_arguments() + if not args: + if param_names: + return 0 + else: + return None + + is_kwarg = False + for i, (star_count, key_start, had_equal) in enumerate(args): + is_kwarg |= had_equal | (star_count == 2) + if star_count: + pass # For now do nothing, we don't know what's in there here. + else: + if i + 1 != len(args): # Not last + if had_equal: + used_names.add(key_start) + else: + positional_count += 1 + + for i, param_name in enumerate(param_names): + kind = param_name.get_kind() + + if not is_kwarg: + if kind == Parameter.VAR_POSITIONAL: + return i + if kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.POSITIONAL_ONLY): + if i == positional_count: + return i + + if key_start is not None and not star_count == 1 or star_count == 2: + if param_name.string_name not in used_names \ + and (kind == Parameter.KEYWORD_ONLY + or kind == Parameter.POSITIONAL_OR_KEYWORD + and positional_count <= i): + if star_count: + return i + if had_equal: + if param_name.string_name == key_start: + return i + else: + if param_name.string_name.startswith(key_start): + return i + + if kind == Parameter.VAR_KEYWORD: + return i + return None + + def iter_used_keyword_arguments(self): + for star_count, key_start, had_equal in list(self._list_arguments()): + if had_equal and key_start: + yield key_start + + def count_positional_arguments(self): + count = 0 + for star_count, key_start, had_equal in self._list_arguments()[:-1]: + if star_count: + break + count += 1 + return count + + +def _iter_arguments(nodes, position): + def remove_after_pos(name): + if name.type != 'name': + return None + return name.value[:position[1] - name.start_pos[1]] + + # Returns Generator[Tuple[star_count, Optional[key_start: str], had_equal]] + nodes_before = [c for c in nodes if c.start_pos < position] + if nodes_before[-1].type == 'arglist': + yield from _iter_arguments(nodes_before[-1].children, position) + return + + previous_node_yielded = False + stars_seen = 0 + for i, node in enumerate(nodes_before): + if node.type == 'argument': + previous_node_yielded = True + first = node.children[0] + second = node.children[1] + if second == '=': + if second.start_pos < position: + yield 0, first.value, True + else: + yield 0, remove_after_pos(first), False + elif first in ('*', '**'): + yield len(first.value), remove_after_pos(second), False + else: + # Must be a Comprehension + first_leaf = node.get_first_leaf() + if first_leaf.type == 'name' and first_leaf.start_pos >= position: + yield 0, remove_after_pos(first_leaf), False + else: + yield 0, None, False + stars_seen = 0 + elif node.type == 'testlist_star_expr': + for n in node.children[::2]: + if n.type == 'star_expr': + stars_seen = 1 + n = n.children[1] + yield stars_seen, remove_after_pos(n), False + stars_seen = 0 + # The count of children is even if there's a comma at the end. + previous_node_yielded = bool(len(node.children) % 2) + elif isinstance(node, tree.PythonLeaf) and node.value == ',': + if not previous_node_yielded: + yield stars_seen, '', False + stars_seen = 0 + previous_node_yielded = False + elif isinstance(node, tree.PythonLeaf) and node.value in ('*', '**'): + stars_seen = len(node.value) + elif node == '=' and nodes_before[-1]: + previous_node_yielded = True + before = nodes_before[i - 1] + if before.type == 'name': + yield 0, before.value, True + else: + yield 0, None, False + # Just ignore the star that is probably a syntax error. + stars_seen = 0 + + if not previous_node_yielded: + if nodes_before[-1].type == 'name': + yield stars_seen, remove_after_pos(nodes_before[-1]), False + else: + yield stars_seen, '', False + + +def _get_index_and_key(nodes, position): + """ + Returns the amount of commas and the keyword argument string. + """ + nodes_before = [c for c in nodes if c.start_pos < position] + if nodes_before[-1].type == 'arglist': + return _get_index_and_key(nodes_before[-1].children, position) + + key_str = None + + last = nodes_before[-1] + if last.type == 'argument' and last.children[1] == '=' \ + and last.children[1].end_pos <= position: + # Checked if the argument + key_str = last.children[0].value + elif last == '=': + key_str = nodes_before[-2].value + + return nodes_before.count(','), key_str + + +def _get_signature_details_from_error_node(node, additional_children, position): + for index, element in reversed(list(enumerate(node.children))): + # `index > 0` means that it's a trailer and not an atom. + if element == '(' and element.end_pos <= position and index > 0: + # It's an error node, we don't want to match too much, just + # until the parentheses is enough. + children = node.children[index:] + name = element.get_previous_leaf() + if name is None: + continue + if name.type == 'name' or name.parent.type in ('trailer', 'atom'): + return CallDetails(element, children + additional_children, position) + + +def get_signature_details(module, position): + leaf = module.get_leaf_for_position(position, include_prefixes=True) + # It's easier to deal with the previous token than the next one in this + # case. + if leaf.start_pos >= position: + # Whitespace / comments after the leaf count towards the previous leaf. + leaf = leaf.get_previous_leaf() + if leaf is None: + return None + + # Now that we know where we are in the syntax tree, we start to look at + # parents for possible function definitions. + node = leaf.parent + while node is not None: + if node.type in ('funcdef', 'classdef', 'decorated', 'async_stmt'): + # Don't show signatures if there's stuff before it that just + # makes it feel strange to have a signature. + return None + + additional_children = [] + for n in reversed(node.children): + if n.start_pos < position: + if n.type == 'error_node': + result = _get_signature_details_from_error_node( + n, additional_children, position + ) + if result is not None: + return result + + additional_children[0:0] = n.children + continue + additional_children.insert(0, n) + + # Find a valid trailer + if node.type == 'trailer' and node.children[0] == '(' \ + or node.type == 'decorator' and node.children[2] == '(': + # Additionally we have to check that an ending parenthesis isn't + # interpreted wrong. There are two cases: + # 1. Cursor before paren -> The current signature is good + # 2. Cursor after paren -> We need to skip the current signature + if not (leaf is node.children[-1] and position >= leaf.end_pos): + leaf = node.get_previous_leaf() + if leaf is None: + return None + return CallDetails( + node.children[0] if node.type == 'trailer' else node.children[2], + node.children, + position + ) + + node = node.parent + + return None + + +@signature_time_cache("call_signatures_validity") +def cache_signatures(inference_state, context, bracket_leaf, code_lines, user_pos): + """This function calculates the cache key.""" + line_index = user_pos[0] - 1 + + before_cursor = code_lines[line_index][:user_pos[1]] + other_lines = code_lines[bracket_leaf.start_pos[0]:line_index] + whole = ''.join(other_lines + [before_cursor]) + before_bracket = re.match(r'.*\(', whole, re.DOTALL) + + module_path = context.get_root_context().py__file__() + if module_path is None: + yield None # Don't cache! + else: + yield (module_path, before_bracket, bracket_leaf.start_pos) + yield infer( + inference_state, + context, + bracket_leaf.get_previous_leaf(), + ) + + +def validate_line_column(func): + @wraps(func) + def wrapper(self, line=None, column=None, *args, **kwargs): + line = max(len(self._code_lines), 1) if line is None else line + if not (0 < line <= len(self._code_lines)): + raise ValueError('`line` parameter is not in a valid range.') + + line_string = self._code_lines[line - 1] + line_len = len(line_string) + if line_string.endswith('\r\n'): + line_len -= 2 + elif line_string.endswith('\n'): + line_len -= 1 + + column = line_len if column is None else column + if not (0 <= column <= line_len): + raise ValueError('`column` parameter (%d) is not in a valid range ' + '(0-%d) for line %d (%r).' % ( + column, line_len, line, line_string)) + return func(self, line, column, *args, **kwargs) + return wrapper + + +def get_module_names(module, all_scopes, definitions=True, references=False): + """ + Returns a dictionary with name parts as keys and their call paths as + values. + """ + def def_ref_filter(name): + is_def = name.is_definition() + return definitions and is_def or references and not is_def + + names = list(chain.from_iterable(module.get_used_names().values())) + if not all_scopes: + # We have to filter all the names that don't have the module as a + # parent_scope. There's None as a parent, because nodes in the module + # node have the parent module and not suite as all the others. + # Therefore it's important to catch that case. + + def is_module_scope_name(name): + parent_scope = get_parent_scope(name) + # async functions have an extra wrapper. Strip it. + if parent_scope and parent_scope.type == 'async_stmt': + parent_scope = parent_scope.parent + return parent_scope in (module, None) + + names = [n for n in names if is_module_scope_name(n)] + return filter(def_ref_filter, names) + + +def split_search_string(name): + type, _, dotted_names = name.rpartition(' ') + if type == 'def': + type = 'function' + return type, dotted_names.split('.') diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/api/interpreter.py b/bundle/jedi-vim/pythonx/jedi/jedi/api/interpreter.py new file mode 100644 index 000000000..befafe5a4 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/api/interpreter.py @@ -0,0 +1,74 @@ +""" +TODO Some parts of this module are still not well documented. +""" + +from jedi.inference import compiled +from jedi.inference.base_value import ValueSet +from jedi.inference.filters import ParserTreeFilter, MergedFilter +from jedi.inference.names import TreeNameDefinition +from jedi.inference.compiled import mixed +from jedi.inference.compiled.access import create_access_path +from jedi.inference.context import ModuleContext + + +def _create(inference_state, obj): + return compiled.create_from_access_path( + inference_state, create_access_path(inference_state, obj) + ) + + +class NamespaceObject: + def __init__(self, dct): + self.__dict__ = dct + + +class MixedTreeName(TreeNameDefinition): + def infer(self): + """ + In IPython notebook it is typical that some parts of the code that is + provided was already executed. In that case if something is not properly + inferred, it should still infer from the variables it already knows. + """ + inferred = super().infer() + if not inferred: + for compiled_value in self.parent_context.mixed_values: + for f in compiled_value.get_filters(): + values = ValueSet.from_sets( + n.infer() for n in f.get(self.string_name) + ) + if values: + return values + return inferred + + +class MixedParserTreeFilter(ParserTreeFilter): + name_class = MixedTreeName + + +class MixedModuleContext(ModuleContext): + def __init__(self, tree_module_value, namespaces): + super().__init__(tree_module_value) + self.mixed_values = [ + self._get_mixed_object( + _create(self.inference_state, NamespaceObject(n)) + ) for n in namespaces + ] + + def _get_mixed_object(self, compiled_value): + return mixed.MixedObject( + compiled_value=compiled_value, + tree_value=self._value + ) + + def get_filters(self, until_position=None, origin_scope=None): + yield MergedFilter( + MixedParserTreeFilter( + parent_context=self, + until_position=until_position, + origin_scope=origin_scope + ), + self.get_global_filter(), + ) + + for mixed_object in self.mixed_values: + yield from mixed_object.get_filters(until_position, origin_scope) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/api/keywords.py b/bundle/jedi-vim/pythonx/jedi/jedi/api/keywords.py new file mode 100644 index 000000000..95c49227f --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/api/keywords.py @@ -0,0 +1,52 @@ +import pydoc +from contextlib import suppress +from typing import Dict, Optional + +from jedi.inference.names import AbstractArbitraryName + +try: + # https://github.com/python/typeshed/pull/4351 adds pydoc_data + from pydoc_data import topics # type: ignore[import] + pydoc_topics: Optional[Dict[str, str]] = topics.topics +except ImportError: + # Python 3.6.8 embeddable does not have pydoc_data. + pydoc_topics = None + + +class KeywordName(AbstractArbitraryName): + api_type = 'keyword' + + def py__doc__(self): + return imitate_pydoc(self.string_name) + + +def imitate_pydoc(string): + """ + It's not possible to get the pydoc's without starting the annoying pager + stuff. + """ + if pydoc_topics is None: + return '' + + h = pydoc.help + with suppress(KeyError): + # try to access symbols + string = h.symbols[string] + string, _, related = string.partition(' ') + + def get_target(s): + return h.topics.get(s, h.keywords.get(s)) + + while isinstance(string, str): + string = get_target(string) + + try: + # is a tuple now + label, related = string + except TypeError: + return '' + + try: + return pydoc_topics[label].strip() if pydoc_topics else '' + except KeyError: + return '' diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/api/project.py b/bundle/jedi-vim/pythonx/jedi/jedi/api/project.py new file mode 100644 index 000000000..da0f31d7b --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/api/project.py @@ -0,0 +1,450 @@ +""" +Projects are a way to handle Python projects within Jedi. For simpler plugins +you might not want to deal with projects, but if you want to give the user more +flexibility to define sys paths and Python interpreters for a project, +:class:`.Project` is the perfect way to allow for that. + +Projects can be saved to disk and loaded again, to allow project definitions to +be used across repositories. +""" +import json +from pathlib import Path +from itertools import chain + +from jedi import debug +from jedi.api.environment import get_cached_default_environment, create_environment +from jedi.api.exceptions import WrongVersion +from jedi.api.completion import search_in_module +from jedi.api.helpers import split_search_string, get_module_names +from jedi.inference.imports import load_module_from_path, \ + load_namespace_from_path, iter_module_names +from jedi.inference.sys_path import discover_buildout_paths +from jedi.inference.cache import inference_state_as_method_param_cache +from jedi.inference.references import recurse_find_python_folders_and_files, search_in_file_ios +from jedi.file_io import FolderIO + +_CONFIG_FOLDER = '.jedi' +_CONTAINS_POTENTIAL_PROJECT = \ + 'setup.py', '.git', '.hg', 'requirements.txt', 'MANIFEST.in', 'pyproject.toml' + +_SERIALIZER_VERSION = 1 + + +def _try_to_skip_duplicates(func): + def wrapper(*args, **kwargs): + found_tree_nodes = [] + found_modules = [] + for definition in func(*args, **kwargs): + tree_node = definition._name.tree_name + if tree_node is not None and tree_node in found_tree_nodes: + continue + if definition.type == 'module' and definition.module_path is not None: + if definition.module_path in found_modules: + continue + found_modules.append(definition.module_path) + yield definition + found_tree_nodes.append(tree_node) + return wrapper + + +def _remove_duplicates_from_path(path): + used = set() + for p in path: + if p in used: + continue + used.add(p) + yield p + + +class Project: + """ + Projects are a simple way to manage Python folders and define how Jedi does + import resolution. It is mostly used as a parameter to :class:`.Script`. + Additionally there are functions to search a whole project. + """ + _environment = None + + @staticmethod + def _get_config_folder_path(base_path): + return base_path.joinpath(_CONFIG_FOLDER) + + @staticmethod + def _get_json_path(base_path): + return Project._get_config_folder_path(base_path).joinpath('project.json') + + @classmethod + def load(cls, path): + """ + Loads a project from a specific path. You should not provide the path + to ``.jedi/project.json``, but rather the path to the project folder. + + :param path: The path of the directory you want to use as a project. + """ + if isinstance(path, str): + path = Path(path) + with open(cls._get_json_path(path)) as f: + version, data = json.load(f) + + if version == 1: + return cls(**data) + else: + raise WrongVersion( + "The Jedi version of this project seems newer than what we can handle." + ) + + def save(self): + """ + Saves the project configuration in the project in ``.jedi/project.json``. + """ + data = dict(self.__dict__) + data.pop('_environment', None) + data.pop('_django', None) # TODO make django setting public? + data = {k.lstrip('_'): v for k, v in data.items()} + data['path'] = str(data['path']) + + self._get_config_folder_path(self._path).mkdir(parents=True, exist_ok=True) + with open(self._get_json_path(self._path), 'w') as f: + return json.dump((_SERIALIZER_VERSION, data), f) + + def __init__( + self, + path, + *, + environment_path=None, + load_unsafe_extensions=False, + sys_path=None, + added_sys_path=(), + smart_sys_path=True, + ) -> None: + """ + :param path: The base path for this project. + :param environment_path: The Python executable path, typically the path + of a virtual environment. + :param load_unsafe_extensions: Default False, Loads extensions that are not in the + sys path and in the local directories. With this option enabled, + this is potentially unsafe if you clone a git repository and + analyze it's code, because those compiled extensions will be + important and therefore have execution privileges. + :param sys_path: list of str. You can override the sys path if you + want. By default the ``sys.path.`` is generated by the + environment (virtualenvs, etc). + :param added_sys_path: list of str. Adds these paths at the end of the + sys path. + :param smart_sys_path: If this is enabled (default), adds paths from + local directories. Otherwise you will have to rely on your packages + being properly configured on the ``sys.path``. + """ + + if isinstance(path, str): + path = Path(path).absolute() + self._path = path + + self._environment_path = environment_path + if sys_path is not None: + # Remap potential pathlib.Path entries + sys_path = list(map(str, sys_path)) + self._sys_path = sys_path + self._smart_sys_path = smart_sys_path + self._load_unsafe_extensions = load_unsafe_extensions + self._django = False + # Remap potential pathlib.Path entries + self.added_sys_path = list(map(str, added_sys_path)) + """The sys path that is going to be added at the end of the """ + + @property + def path(self): + """ + The base path for this project. + """ + return self._path + + @property + def sys_path(self): + """ + The sys path provided to this project. This can be None and in that + case will be auto generated. + """ + return self._sys_path + + @property + def smart_sys_path(self): + """ + If the sys path is going to be calculated in a smart way, where + additional paths are added. + """ + return self._smart_sys_path + + @property + def load_unsafe_extensions(self): + """ + Wheter the project loads unsafe extensions. + """ + return self._load_unsafe_extensions + + @inference_state_as_method_param_cache() + def _get_base_sys_path(self, inference_state): + # The sys path has not been set explicitly. + sys_path = list(inference_state.environment.get_sys_path()) + try: + sys_path.remove('') + except ValueError: + pass + return sys_path + + @inference_state_as_method_param_cache() + def _get_sys_path(self, inference_state, add_parent_paths=True, add_init_paths=False): + """ + Keep this method private for all users of jedi. However internally this + one is used like a public method. + """ + suffixed = list(self.added_sys_path) + prefixed = [] + + if self._sys_path is None: + sys_path = list(self._get_base_sys_path(inference_state)) + else: + sys_path = list(self._sys_path) + + if self._smart_sys_path: + prefixed.append(str(self._path)) + + if inference_state.script_path is not None: + suffixed += map(str, discover_buildout_paths( + inference_state, + inference_state.script_path + )) + + if add_parent_paths: + # Collect directories in upward search by: + # 1. Skipping directories with __init__.py + # 2. Stopping immediately when above self._path + traversed = [] + for parent_path in inference_state.script_path.parents: + if parent_path == self._path \ + or self._path not in parent_path.parents: + break + if not add_init_paths \ + and parent_path.joinpath("__init__.py").is_file(): + continue + traversed.append(str(parent_path)) + + # AFAIK some libraries have imports like `foo.foo.bar`, which + # leads to the conclusion to by default prefer longer paths + # rather than shorter ones by default. + suffixed += reversed(traversed) + + if self._django: + prefixed.append(str(self._path)) + + path = prefixed + sys_path + suffixed + return list(_remove_duplicates_from_path(path)) + + def get_environment(self): + if self._environment is None: + if self._environment_path is not None: + self._environment = create_environment(self._environment_path, safe=False) + else: + self._environment = get_cached_default_environment() + return self._environment + + def search(self, string, *, all_scopes=False): + """ + Searches a name in the whole project. If the project is very big, + at some point Jedi will stop searching. However it's also very much + recommended to not exhaust the generator. Just display the first ten + results to the user. + + There are currently three different search patterns: + + - ``foo`` to search for a definition foo in any file or a file called + ``foo.py`` or ``foo.pyi``. + - ``foo.bar`` to search for the ``foo`` and then an attribute ``bar`` + in it. + - ``class foo.bar.Bar`` or ``def foo.bar.baz`` to search for a specific + API type. + + :param bool all_scopes: Default False; searches not only for + definitions on the top level of a module level, but also in + functions and classes. + :yields: :class:`.Name` + """ + return self._search_func(string, all_scopes=all_scopes) + + def complete_search(self, string, **kwargs): + """ + Like :meth:`.Script.search`, but completes that string. An empty string + lists all definitions in a project, so be careful with that. + + :param bool all_scopes: Default False; searches not only for + definitions on the top level of a module level, but also in + functions and classes. + :yields: :class:`.Completion` + """ + return self._search_func(string, complete=True, **kwargs) + + @_try_to_skip_duplicates + def _search_func(self, string, complete=False, all_scopes=False): + # Using a Script is they easiest way to get an empty module context. + from jedi import Script + s = Script('', project=self) + inference_state = s._inference_state + empty_module_context = s._get_module_context() + + debug.dbg('Search for string %s, complete=%s', string, complete) + wanted_type, wanted_names = split_search_string(string) + name = wanted_names[0] + stub_folder_name = name + '-stubs' + + ios = recurse_find_python_folders_and_files(FolderIO(str(self._path))) + file_ios = [] + + # 1. Search for modules in the current project + for folder_io, file_io in ios: + if file_io is None: + file_name = folder_io.get_base_name() + if file_name == name or file_name == stub_folder_name: + f = folder_io.get_file_io('__init__.py') + try: + m = load_module_from_path(inference_state, f).as_context() + except FileNotFoundError: + f = folder_io.get_file_io('__init__.pyi') + try: + m = load_module_from_path(inference_state, f).as_context() + except FileNotFoundError: + m = load_namespace_from_path(inference_state, folder_io).as_context() + else: + continue + else: + file_ios.append(file_io) + if Path(file_io.path).name in (name + '.py', name + '.pyi'): + m = load_module_from_path(inference_state, file_io).as_context() + else: + continue + + debug.dbg('Search of a specific module %s', m) + yield from search_in_module( + inference_state, + m, + names=[m.name], + wanted_type=wanted_type, + wanted_names=wanted_names, + complete=complete, + convert=True, + ignore_imports=True, + ) + + # 2. Search for identifiers in the project. + for module_context in search_in_file_ios(inference_state, file_ios, + name, complete=complete): + names = get_module_names(module_context.tree_node, all_scopes=all_scopes) + names = [module_context.create_name(n) for n in names] + names = _remove_imports(names) + yield from search_in_module( + inference_state, + module_context, + names=names, + wanted_type=wanted_type, + wanted_names=wanted_names, + complete=complete, + ignore_imports=True, + ) + + # 3. Search for modules on sys.path + sys_path = [ + p for p in self._get_sys_path(inference_state) + # Exclude folders that are handled by recursing of the Python + # folders. + if not p.startswith(str(self._path)) + ] + names = list(iter_module_names(inference_state, empty_module_context, sys_path)) + yield from search_in_module( + inference_state, + empty_module_context, + names=names, + wanted_type=wanted_type, + wanted_names=wanted_names, + complete=complete, + convert=True, + ) + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._path) + + +def _is_potential_project(path): + for name in _CONTAINS_POTENTIAL_PROJECT: + try: + if path.joinpath(name).exists(): + return True + except OSError: + continue + return False + + +def _is_django_path(directory): + """ Detects the path of the very well known Django library (if used) """ + try: + with open(directory.joinpath('manage.py'), 'rb') as f: + return b"DJANGO_SETTINGS_MODULE" in f.read() + except (FileNotFoundError, IsADirectoryError, PermissionError): + return False + + +def get_default_project(path=None): + """ + If a project is not defined by the user, Jedi tries to define a project by + itself as well as possible. Jedi traverses folders until it finds one of + the following: + + 1. A ``.jedi/config.json`` + 2. One of the following files: ``setup.py``, ``.git``, ``.hg``, + ``requirements.txt`` and ``MANIFEST.in``. + """ + if path is None: + path = Path.cwd() + elif isinstance(path, str): + path = Path(path) + + check = path.absolute() + probable_path = None + first_no_init_file = None + for dir in chain([check], check.parents): + try: + return Project.load(dir) + except (FileNotFoundError, IsADirectoryError, PermissionError): + pass + except NotADirectoryError: + continue + + if first_no_init_file is None: + if dir.joinpath('__init__.py').exists(): + # In the case that a __init__.py exists, it's in 99% just a + # Python package and the project sits at least one level above. + continue + elif not dir.is_file(): + first_no_init_file = dir + + if _is_django_path(dir): + project = Project(dir) + project._django = True + return project + + if probable_path is None and _is_potential_project(dir): + probable_path = dir + + if probable_path is not None: + # TODO search for setup.py etc + return Project(probable_path) + + if first_no_init_file is not None: + return Project(first_no_init_file) + + curdir = path if path.is_dir() else path.parent + return Project(curdir) + + +def _remove_imports(names): + return [ + n for n in names + if n.tree_name is None or n.api_type not in ('module', 'namespace') + ] diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/api/refactoring/__init__.py b/bundle/jedi-vim/pythonx/jedi/jedi/api/refactoring/__init__.py new file mode 100644 index 000000000..84513da02 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/api/refactoring/__init__.py @@ -0,0 +1,242 @@ +import difflib +from pathlib import Path +from typing import Dict, Iterable, Tuple + +from parso import split_lines + +from jedi.api.exceptions import RefactoringError + +EXPRESSION_PARTS = ( + 'or_test and_test not_test comparison ' + 'expr xor_expr and_expr shift_expr arith_expr term factor power atom_expr' +).split() + + +class ChangedFile: + def __init__(self, inference_state, from_path, to_path, + module_node, node_to_str_map): + self._inference_state = inference_state + self._from_path = from_path + self._to_path = to_path + self._module_node = module_node + self._node_to_str_map = node_to_str_map + + def get_diff(self): + old_lines = split_lines(self._module_node.get_code(), keepends=True) + new_lines = split_lines(self.get_new_code(), keepends=True) + + # Add a newline at the end if it's missing. Otherwise the diff will be + # very weird. A `diff -u file1 file2` would show the string: + # + # \ No newline at end of file + # + # This is not necessary IMO, because Jedi does not really play with + # newlines and the ending newline does not really matter in Python + # files. ~dave + if old_lines[-1] != '': + old_lines[-1] += '\n' + if new_lines[-1] != '': + new_lines[-1] += '\n' + + project_path = self._inference_state.project.path + if self._from_path is None: + from_p = '' + else: + from_p = self._from_path.relative_to(project_path) + if self._to_path is None: + to_p = '' + else: + to_p = self._to_path.relative_to(project_path) + diff = difflib.unified_diff( + old_lines, new_lines, + fromfile=str(from_p), + tofile=str(to_p), + ) + # Apparently there's a space at the end of the diff - for whatever + # reason. + return ''.join(diff).rstrip(' ') + + def get_new_code(self): + return self._inference_state.grammar.refactor(self._module_node, self._node_to_str_map) + + def apply(self): + if self._from_path is None: + raise RefactoringError( + 'Cannot apply a refactoring on a Script with path=None' + ) + + with open(self._from_path, 'w', newline='') as f: + f.write(self.get_new_code()) + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._from_path) + + +class Refactoring: + def __init__(self, inference_state, file_to_node_changes, renames=()): + self._inference_state = inference_state + self._renames = renames + self._file_to_node_changes = file_to_node_changes + + def get_changed_files(self) -> Dict[Path, ChangedFile]: + def calculate_to_path(p): + if p is None: + return p + p = str(p) + for from_, to in renames: + if p.startswith(str(from_)): + p = str(to) + p[len(str(from_)):] + return Path(p) + + renames = self.get_renames() + return { + path: ChangedFile( + self._inference_state, + from_path=path, + to_path=calculate_to_path(path), + module_node=next(iter(map_)).get_root_node(), + node_to_str_map=map_ + ) for path, map_ in sorted(self._file_to_node_changes.items()) + } + + def get_renames(self) -> Iterable[Tuple[Path, Path]]: + """ + Files can be renamed in a refactoring. + """ + return sorted(self._renames) + + def get_diff(self): + text = '' + project_path = self._inference_state.project.path + for from_, to in self.get_renames(): + text += 'rename from %s\nrename to %s\n' \ + % (from_.relative_to(project_path), to.relative_to(project_path)) + + return text + ''.join(f.get_diff() for f in self.get_changed_files().values()) + + def apply(self): + """ + Applies the whole refactoring to the files, which includes renames. + """ + for f in self.get_changed_files().values(): + f.apply() + + for old, new in self.get_renames(): + old.rename(new) + + +def _calculate_rename(path, new_name): + dir_ = path.parent + if path.name in ('__init__.py', '__init__.pyi'): + return dir_, dir_.parent.joinpath(new_name) + return path, dir_.joinpath(new_name + path.suffix) + + +def rename(inference_state, definitions, new_name): + file_renames = set() + file_tree_name_map = {} + + if not definitions: + raise RefactoringError("There is no name under the cursor") + + for d in definitions: + tree_name = d._name.tree_name + if d.type == 'module' and tree_name is None: + p = None if d.module_path is None else Path(d.module_path) + file_renames.add(_calculate_rename(p, new_name)) + else: + # This private access is ok in a way. It's not public to + # protect Jedi users from seeing it. + if tree_name is not None: + fmap = file_tree_name_map.setdefault(d.module_path, {}) + fmap[tree_name] = tree_name.prefix + new_name + return Refactoring(inference_state, file_tree_name_map, file_renames) + + +def inline(inference_state, names): + if not names: + raise RefactoringError("There is no name under the cursor") + if any(n.api_type in ('module', 'namespace') for n in names): + raise RefactoringError("Cannot inline imports, modules or namespaces") + if any(n.tree_name is None for n in names): + raise RefactoringError("Cannot inline builtins/extensions") + + definitions = [n for n in names if n.tree_name.is_definition()] + if len(definitions) == 0: + raise RefactoringError("No definition found to inline") + if len(definitions) > 1: + raise RefactoringError("Cannot inline a name with multiple definitions") + if len(names) == 1: + raise RefactoringError("There are no references to this name") + + tree_name = definitions[0].tree_name + + expr_stmt = tree_name.get_definition() + if expr_stmt.type != 'expr_stmt': + type_ = dict( + funcdef='function', + classdef='class', + ).get(expr_stmt.type, expr_stmt.type) + raise RefactoringError("Cannot inline a %s" % type_) + + if len(expr_stmt.get_defined_names(include_setitem=True)) > 1: + raise RefactoringError("Cannot inline a statement with multiple definitions") + first_child = expr_stmt.children[1] + if first_child.type == 'annassign' and len(first_child.children) == 4: + first_child = first_child.children[2] + if first_child != '=': + if first_child.type == 'annassign': + raise RefactoringError( + 'Cannot inline a statement that is defined by an annotation' + ) + else: + raise RefactoringError( + 'Cannot inline a statement with "%s"' + % first_child.get_code(include_prefix=False) + ) + + rhs = expr_stmt.get_rhs() + replace_code = rhs.get_code(include_prefix=False) + + references = [n for n in names if not n.tree_name.is_definition()] + file_to_node_changes = {} + for name in references: + tree_name = name.tree_name + path = name.get_root_context().py__file__() + s = replace_code + if rhs.type == 'testlist_star_expr' \ + or tree_name.parent.type in EXPRESSION_PARTS \ + or tree_name.parent.type == 'trailer' \ + and tree_name.parent.get_next_sibling() is not None: + s = '(' + replace_code + ')' + + of_path = file_to_node_changes.setdefault(path, {}) + + n = tree_name + prefix = n.prefix + par = n.parent + if par.type == 'trailer' and par.children[0] == '.': + prefix = par.parent.children[0].prefix + n = par + for some_node in par.parent.children[:par.parent.children.index(par)]: + of_path[some_node] = '' + of_path[n] = prefix + s + + path = definitions[0].get_root_context().py__file__() + changes = file_to_node_changes.setdefault(path, {}) + changes[expr_stmt] = _remove_indent_of_prefix(expr_stmt.get_first_leaf().prefix) + next_leaf = expr_stmt.get_next_leaf() + + # Most of the time we have to remove the newline at the end of the + # statement, but if there's a comment we might not need to. + if next_leaf.prefix.strip(' \t') == '' \ + and (next_leaf.type == 'newline' or next_leaf == ';'): + changes[next_leaf] = '' + return Refactoring(inference_state, file_to_node_changes) + + +def _remove_indent_of_prefix(prefix): + r""" + Removes the last indentation of a prefix, e.g. " \n \n " becomes " \n \n". + """ + return ''.join(split_lines(prefix, keepends=True)[:-1]) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/api/refactoring/extract.py b/bundle/jedi-vim/pythonx/jedi/jedi/api/refactoring/extract.py new file mode 100644 index 000000000..6e7df7e13 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/api/refactoring/extract.py @@ -0,0 +1,386 @@ +from textwrap import dedent + +from parso import split_lines + +from jedi import debug +from jedi.api.exceptions import RefactoringError +from jedi.api.refactoring import Refactoring, EXPRESSION_PARTS +from jedi.common import indent_block +from jedi.parser_utils import function_is_classmethod, function_is_staticmethod + + +_DEFINITION_SCOPES = ('suite', 'file_input') +_VARIABLE_EXCTRACTABLE = EXPRESSION_PARTS + \ + ('atom testlist_star_expr testlist test lambdef lambdef_nocond ' + 'keyword name number string fstring').split() + + +def extract_variable(inference_state, path, module_node, name, pos, until_pos): + nodes = _find_nodes(module_node, pos, until_pos) + debug.dbg('Extracting nodes: %s', nodes) + + is_expression, message = _is_expression_with_error(nodes) + if not is_expression: + raise RefactoringError(message) + + generated_code = name + ' = ' + _expression_nodes_to_string(nodes) + file_to_node_changes = {path: _replace(nodes, name, generated_code, pos)} + return Refactoring(inference_state, file_to_node_changes) + + +def _is_expression_with_error(nodes): + """ + Returns a tuple (is_expression, error_string). + """ + if any(node.type == 'name' and node.is_definition() for node in nodes): + return False, 'Cannot extract a name that defines something' + + if nodes[0].type not in _VARIABLE_EXCTRACTABLE: + return False, 'Cannot extract a "%s"' % nodes[0].type + return True, '' + + +def _find_nodes(module_node, pos, until_pos): + """ + Looks up a module and tries to find the appropriate amount of nodes that + are in there. + """ + start_node = module_node.get_leaf_for_position(pos, include_prefixes=True) + + if until_pos is None: + if start_node.type == 'operator': + next_leaf = start_node.get_next_leaf() + if next_leaf is not None and next_leaf.start_pos == pos: + start_node = next_leaf + + if _is_not_extractable_syntax(start_node): + start_node = start_node.parent + + if start_node.parent.type == 'trailer': + start_node = start_node.parent.parent + while start_node.parent.type in EXPRESSION_PARTS: + start_node = start_node.parent + + nodes = [start_node] + else: + # Get the next leaf if we are at the end of a leaf + if start_node.end_pos == pos: + next_leaf = start_node.get_next_leaf() + if next_leaf is not None: + start_node = next_leaf + + # Some syntax is not exactable, just use its parent + if _is_not_extractable_syntax(start_node): + start_node = start_node.parent + + # Find the end + end_leaf = module_node.get_leaf_for_position(until_pos, include_prefixes=True) + if end_leaf.start_pos > until_pos: + end_leaf = end_leaf.get_previous_leaf() + if end_leaf is None: + raise RefactoringError('Cannot extract anything from that') + + parent_node = start_node + while parent_node.end_pos < end_leaf.end_pos: + parent_node = parent_node.parent + + nodes = _remove_unwanted_expression_nodes(parent_node, pos, until_pos) + + # If the user marks just a return statement, we return the expression + # instead of the whole statement, because the user obviously wants to + # extract that part. + if len(nodes) == 1 and start_node.type in ('return_stmt', 'yield_expr'): + return [nodes[0].children[1]] + return nodes + + +def _replace(nodes, expression_replacement, extracted, pos, + insert_before_leaf=None, remaining_prefix=None): + # Now try to replace the nodes found with a variable and move the code + # before the current statement. + definition = _get_parent_definition(nodes[0]) + if insert_before_leaf is None: + insert_before_leaf = definition.get_first_leaf() + first_node_leaf = nodes[0].get_first_leaf() + + lines = split_lines(insert_before_leaf.prefix, keepends=True) + if first_node_leaf is insert_before_leaf: + if remaining_prefix is not None: + # The remaining prefix has already been calculated. + lines[:-1] = remaining_prefix + lines[-1:-1] = [indent_block(extracted, lines[-1]) + '\n'] + extracted_prefix = ''.join(lines) + + replacement_dct = {} + if first_node_leaf is insert_before_leaf: + replacement_dct[nodes[0]] = extracted_prefix + expression_replacement + else: + if remaining_prefix is None: + p = first_node_leaf.prefix + else: + p = remaining_prefix + _get_indentation(nodes[0]) + replacement_dct[nodes[0]] = p + expression_replacement + replacement_dct[insert_before_leaf] = extracted_prefix + insert_before_leaf.value + + for node in nodes[1:]: + replacement_dct[node] = '' + return replacement_dct + + +def _expression_nodes_to_string(nodes): + return ''.join(n.get_code(include_prefix=i != 0) for i, n in enumerate(nodes)) + + +def _suite_nodes_to_string(nodes, pos): + n = nodes[0] + prefix, part_of_code = _split_prefix_at(n.get_first_leaf(), pos[0] - 1) + code = part_of_code + n.get_code(include_prefix=False) \ + + ''.join(n.get_code() for n in nodes[1:]) + return prefix, code + + +def _split_prefix_at(leaf, until_line): + """ + Returns a tuple of the leaf's prefix, split at the until_line + position. + """ + # second means the second returned part + second_line_count = leaf.start_pos[0] - until_line + lines = split_lines(leaf.prefix, keepends=True) + return ''.join(lines[:-second_line_count]), ''.join(lines[-second_line_count:]) + + +def _get_indentation(node): + return split_lines(node.get_first_leaf().prefix)[-1] + + +def _get_parent_definition(node): + """ + Returns the statement where a node is defined. + """ + while node is not None: + if node.parent.type in _DEFINITION_SCOPES: + return node + node = node.parent + raise NotImplementedError('We should never even get here') + + +def _remove_unwanted_expression_nodes(parent_node, pos, until_pos): + """ + This function makes it so for `1 * 2 + 3` you can extract `2 + 3`, even + though it is not part of the expression. + """ + typ = parent_node.type + is_suite_part = typ in ('suite', 'file_input') + if typ in EXPRESSION_PARTS or is_suite_part: + nodes = parent_node.children + for i, n in enumerate(nodes): + if n.end_pos > pos: + start_index = i + if n.type == 'operator': + start_index -= 1 + break + for i, n in reversed(list(enumerate(nodes))): + if n.start_pos < until_pos: + end_index = i + if n.type == 'operator': + end_index += 1 + + # Something like `not foo or bar` should not be cut after not + for n2 in nodes[i:]: + if _is_not_extractable_syntax(n2): + end_index += 1 + else: + break + break + nodes = nodes[start_index:end_index + 1] + if not is_suite_part: + nodes[0:1] = _remove_unwanted_expression_nodes(nodes[0], pos, until_pos) + nodes[-1:] = _remove_unwanted_expression_nodes(nodes[-1], pos, until_pos) + return nodes + return [parent_node] + + +def _is_not_extractable_syntax(node): + return node.type == 'operator' \ + or node.type == 'keyword' and node.value not in ('None', 'True', 'False') + + +def extract_function(inference_state, path, module_context, name, pos, until_pos): + nodes = _find_nodes(module_context.tree_node, pos, until_pos) + assert len(nodes) + + is_expression, _ = _is_expression_with_error(nodes) + context = module_context.create_context(nodes[0]) + is_bound_method = context.is_bound_method() + params, return_variables = list(_find_inputs_and_outputs(module_context, context, nodes)) + + # Find variables + # Is a class method / method + if context.is_module(): + insert_before_leaf = None # Leaf will be determined later + else: + node = _get_code_insertion_node(context.tree_node, is_bound_method) + insert_before_leaf = node.get_first_leaf() + if is_expression: + code_block = 'return ' + _expression_nodes_to_string(nodes) + '\n' + remaining_prefix = None + has_ending_return_stmt = False + else: + has_ending_return_stmt = _is_node_ending_return_stmt(nodes[-1]) + if not has_ending_return_stmt: + # Find the actually used variables (of the defined ones). If none are + # used (e.g. if the range covers the whole function), return the last + # defined variable. + return_variables = list(_find_needed_output_variables( + context, + nodes[0].parent, + nodes[-1].end_pos, + return_variables + )) or [return_variables[-1]] if return_variables else [] + + remaining_prefix, code_block = _suite_nodes_to_string(nodes, pos) + after_leaf = nodes[-1].get_next_leaf() + first, second = _split_prefix_at(after_leaf, until_pos[0]) + code_block += first + + code_block = dedent(code_block) + if not has_ending_return_stmt: + output_var_str = ', '.join(return_variables) + code_block += 'return ' + output_var_str + '\n' + + # Check if we have to raise RefactoringError + _check_for_non_extractables(nodes[:-1] if has_ending_return_stmt else nodes) + + decorator = '' + self_param = None + if is_bound_method: + if not function_is_staticmethod(context.tree_node): + function_param_names = context.get_value().get_param_names() + if len(function_param_names): + self_param = function_param_names[0].string_name + params = [p for p in params if p != self_param] + + if function_is_classmethod(context.tree_node): + decorator = '@classmethod\n' + else: + code_block += '\n' + + function_code = '%sdef %s(%s):\n%s' % ( + decorator, + name, + ', '.join(params if self_param is None else [self_param] + params), + indent_block(code_block) + ) + + function_call = '%s(%s)' % ( + ('' if self_param is None else self_param + '.') + name, + ', '.join(params) + ) + if is_expression: + replacement = function_call + else: + if has_ending_return_stmt: + replacement = 'return ' + function_call + '\n' + else: + replacement = output_var_str + ' = ' + function_call + '\n' + + replacement_dct = _replace(nodes, replacement, function_code, pos, + insert_before_leaf, remaining_prefix) + if not is_expression: + replacement_dct[after_leaf] = second + after_leaf.value + file_to_node_changes = {path: replacement_dct} + return Refactoring(inference_state, file_to_node_changes) + + +def _check_for_non_extractables(nodes): + for n in nodes: + try: + children = n.children + except AttributeError: + if n.value == 'return': + raise RefactoringError( + 'Can only extract return statements if they are at the end.') + if n.value == 'yield': + raise RefactoringError('Cannot extract yield statements.') + else: + _check_for_non_extractables(children) + + +def _is_name_input(module_context, names, first, last): + for name in names: + if name.api_type == 'param' or not name.parent_context.is_module(): + if name.get_root_context() is not module_context: + return True + if name.start_pos is None or not (first <= name.start_pos < last): + return True + return False + + +def _find_inputs_and_outputs(module_context, context, nodes): + first = nodes[0].start_pos + last = nodes[-1].end_pos + + inputs = [] + outputs = [] + for name in _find_non_global_names(nodes): + if name.is_definition(): + if name not in outputs: + outputs.append(name.value) + else: + if name.value not in inputs: + name_definitions = context.goto(name, name.start_pos) + if not name_definitions \ + or _is_name_input(module_context, name_definitions, first, last): + inputs.append(name.value) + + # Check if outputs are really needed: + return inputs, outputs + + +def _find_non_global_names(nodes): + for node in nodes: + try: + children = node.children + except AttributeError: + if node.type == 'name': + yield node + else: + # We only want to check foo in foo.bar + if node.type == 'trailer' and node.children[0] == '.': + continue + + yield from _find_non_global_names(children) + + +def _get_code_insertion_node(node, is_bound_method): + if not is_bound_method or function_is_staticmethod(node): + while node.parent.type != 'file_input': + node = node.parent + + while node.parent.type in ('async_funcdef', 'decorated', 'async_stmt'): + node = node.parent + return node + + +def _find_needed_output_variables(context, search_node, at_least_pos, return_variables): + """ + Searches everything after at_least_pos in a node and checks if any of the + return_variables are used in there and returns those. + """ + for node in search_node.children: + if node.start_pos < at_least_pos: + continue + + return_variables = set(return_variables) + for name in _find_non_global_names([node]): + if not name.is_definition() and name.value in return_variables: + return_variables.remove(name.value) + yield name.value + + +def _is_node_ending_return_stmt(node): + t = node.type + if t == 'simple_stmt': + return _is_node_ending_return_stmt(node.children[0]) + return t == 'return_stmt' diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/api/replstartup.py b/bundle/jedi-vim/pythonx/jedi/jedi/api/replstartup.py new file mode 100644 index 000000000..e0f23d19b --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/api/replstartup.py @@ -0,0 +1,29 @@ +""" +To use Jedi completion in Python interpreter, add the following in your shell +setup (e.g., ``.bashrc``). This works only on Linux/Mac, because readline is +not available on Windows. If you still want Jedi autocompletion in your REPL, +just use IPython instead:: + + export PYTHONSTARTUP="$(python -m jedi repl)" + +Then you will be able to use Jedi completer in your Python interpreter:: + + $ python + Python 3.9.2+ (default, Jul 20 2020, 22:15:08) + [GCC 4.6.1] on linux2 + Type "help", "copyright", "credits" or "license" for more information. + >>> import os + >>> os.path.join('a', 'b').split().in # doctest: +SKIP + ..dex ..sert + +""" +import jedi.utils +from jedi import __version__ as __jedi_version__ + +print('REPL completion using Jedi %s' % __jedi_version__) +jedi.utils.setup_readline(fuzzy=False) + +del jedi + +# Note: try not to do many things here, as it will contaminate global +# namespace of the interpreter. diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/api/strings.py b/bundle/jedi-vim/pythonx/jedi/jedi/api/strings.py new file mode 100644 index 000000000..f91da25bd --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/api/strings.py @@ -0,0 +1,108 @@ +""" +This module is here for string completions. This means mostly stuff where +strings are returned, like `foo = dict(bar=3); foo["ba` would complete to +`"bar"]`. + +It however does the same for numbers. The difference between string completions +and other completions is mostly that this module doesn't return defined +names in a module, but pretty much an arbitrary string. +""" +import re + +from jedi.inference.names import AbstractArbitraryName +from jedi.inference.helpers import infer_call_of_leaf +from jedi.api.classes import Completion +from jedi.parser_utils import cut_value_at_position + +_sentinel = object() + + +class StringName(AbstractArbitraryName): + api_type = 'string' + is_value_name = False + + +def complete_dict(module_context, code_lines, leaf, position, string, fuzzy): + bracket_leaf = leaf + if bracket_leaf != '[': + bracket_leaf = leaf.get_previous_leaf() + + cut_end_quote = '' + if string: + cut_end_quote = get_quote_ending(string, code_lines, position, invert_result=True) + + if bracket_leaf == '[': + if string is None and leaf is not bracket_leaf: + string = cut_value_at_position(leaf, position) + + context = module_context.create_context(bracket_leaf) + before_bracket_leaf = bracket_leaf.get_previous_leaf() + if before_bracket_leaf.type in ('atom', 'trailer', 'name'): + values = infer_call_of_leaf(context, before_bracket_leaf) + return list(_completions_for_dicts( + module_context.inference_state, + values, + '' if string is None else string, + cut_end_quote, + fuzzy=fuzzy, + )) + return [] + + +def _completions_for_dicts(inference_state, dicts, literal_string, cut_end_quote, fuzzy): + for dict_key in sorted(_get_python_keys(dicts), key=lambda x: repr(x)): + dict_key_str = _create_repr_string(literal_string, dict_key) + if dict_key_str.startswith(literal_string): + name = StringName(inference_state, dict_key_str[:-len(cut_end_quote) or None]) + yield Completion( + inference_state, + name, + stack=None, + like_name_length=len(literal_string), + is_fuzzy=fuzzy + ) + + +def _create_repr_string(literal_string, dict_key): + if not isinstance(dict_key, (str, bytes)) or not literal_string: + return repr(dict_key) + + r = repr(dict_key) + prefix, quote = _get_string_prefix_and_quote(literal_string) + if quote is None: + return r + if quote == r[0]: + return prefix + r + return prefix + quote + r[1:-1] + quote + + +def _get_python_keys(dicts): + for dct in dicts: + if dct.array_type == 'dict': + for key in dct.get_key_values(): + dict_key = key.get_safe_value(default=_sentinel) + if dict_key is not _sentinel: + yield dict_key + + +def _get_string_prefix_and_quote(string): + match = re.match(r'(\w*)("""|\'{3}|"|\')', string) + if match is None: + return None, None + return match.group(1), match.group(2) + + +def _matches_quote_at_position(code_lines, quote, position): + string = code_lines[position[0] - 1][position[1]:position[1] + len(quote)] + return string == quote + + +def get_quote_ending(string, code_lines, position, invert_result=False): + _, quote = _get_string_prefix_and_quote(string) + if quote is None: + return '' + + # Add a quote only if it's not already there. + if _matches_quote_at_position(code_lines, quote, position) != invert_result: + return '' + return quote diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/cache.py b/bundle/jedi-vim/pythonx/jedi/jedi/cache.py new file mode 100644 index 000000000..1ff452013 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/cache.py @@ -0,0 +1,115 @@ +""" +This caching is very important for speed and memory optimizations. There's +nothing really spectacular, just some decorators. The following cache types are +available: + +- ``time_cache`` can be used to cache something for just a limited time span, + which can be useful if there's user interaction and the user cannot react + faster than a certain time. + +This module is one of the reasons why |jedi| is not thread-safe. As you can see +there are global variables, which are holding the cache information. Some of +these variables are being cleaned after every API usage. +""" +import time +from functools import wraps +from typing import Any, Dict, Tuple + +from jedi import settings +from parso.cache import parser_cache + +_time_caches: Dict[str, Dict[Any, Tuple[float, Any]]] = {} + + +def clear_time_caches(delete_all: bool = False) -> None: + """ Jedi caches many things, that should be completed after each completion + finishes. + + :param delete_all: Deletes also the cache that is normally not deleted, + like parser cache, which is important for faster parsing. + """ + global _time_caches + + if delete_all: + for cache in _time_caches.values(): + cache.clear() + parser_cache.clear() + else: + # normally just kill the expired entries, not all + for tc in _time_caches.values(): + # check time_cache for expired entries + for key, (t, value) in list(tc.items()): + if t < time.time(): + # delete expired entries + del tc[key] + + +def signature_time_cache(time_add_setting): + """ + This decorator works as follows: Call it with a setting and after that + use the function with a callable that returns the key. + But: This function is only called if the key is not available. After a + certain amount of time (`time_add_setting`) the cache is invalid. + + If the given key is None, the function will not be cached. + """ + def _temp(key_func): + dct = {} + _time_caches[time_add_setting] = dct + + def wrapper(*args, **kwargs): + generator = key_func(*args, **kwargs) + key = next(generator) + try: + expiry, value = dct[key] + if expiry > time.time(): + return value + except KeyError: + pass + + value = next(generator) + time_add = getattr(settings, time_add_setting) + if key is not None: + dct[key] = time.time() + time_add, value + return value + return wrapper + return _temp + + +def time_cache(seconds): + def decorator(func): + cache = {} + + @wraps(func) + def wrapper(*args, **kwargs): + key = (args, frozenset(kwargs.items())) + try: + created, result = cache[key] + if time.time() < created + seconds: + return result + except KeyError: + pass + result = func(*args, **kwargs) + cache[key] = time.time(), result + return result + + wrapper.clear_cache = lambda: cache.clear() + return wrapper + + return decorator + + +def memoize_method(method): + """A normal memoize function.""" + @wraps(method) + def wrapper(self, *args, **kwargs): + cache_dict = self.__dict__.setdefault('_memoize_method_dct', {}) + dct = cache_dict.setdefault(method, {}) + key = (args, frozenset(kwargs.items())) + try: + return dct[key] + except KeyError: + result = method(self, *args, **kwargs) + dct[key] = result + return result + return wrapper diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/common.py b/bundle/jedi-vim/pythonx/jedi/jedi/common.py new file mode 100644 index 000000000..eb4b49961 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/common.py @@ -0,0 +1,24 @@ +from contextlib import contextmanager + + +@contextmanager +def monkeypatch(obj, attribute_name, new_value): + """ + Like pytest's monkeypatch, but as a value manager. + """ + old_value = getattr(obj, attribute_name) + try: + setattr(obj, attribute_name, new_value) + yield + finally: + setattr(obj, attribute_name, old_value) + + +def indent_block(text, indention=' '): + """This function indents a text block with a default of four spaces.""" + temp = '' + while text and text[-1] == '\n': + temp += text[-1] + text = text[:-1] + lines = text.split('\n') + return '\n'.join(map(lambda s: indention + s, lines)) + temp diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/debug.py b/bundle/jedi-vim/pythonx/jedi/jedi/debug.py new file mode 100644 index 000000000..99a90601e --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/debug.py @@ -0,0 +1,132 @@ +import os +import time +from contextlib import contextmanager +from typing import Callable, Optional + +_inited = False + + +def _lazy_colorama_init(): + """ + Lazily init colorama if necessary, not to screw up stdout if debugging is + not enabled. + + This version of the function does nothing. + """ + + +try: + if os.name == 'nt': + # Does not work on Windows, as pyreadline and colorama interfere + raise ImportError + else: + # Use colorama for nicer console output. + from colorama import Fore, init # type: ignore[import] + from colorama import initialise + + def _lazy_colorama_init(): # noqa: F811 + """ + Lazily init colorama if necessary, not to screw up stdout is + debug not enabled. + + This version of the function does init colorama. + """ + global _inited + if not _inited: + # pytest resets the stream at the end - causes troubles. Since + # after every output the stream is reset automatically we don't + # need this. + initialise.atexit_done = True + try: + init(strip=False) + except Exception: + # Colorama fails with initializing under vim and is buggy in + # version 0.3.6. + pass + _inited = True + +except ImportError: + class Fore: # type: ignore[no-redef] + RED = '' + GREEN = '' + YELLOW = '' + MAGENTA = '' + RESET = '' + BLUE = '' + +NOTICE = object() +WARNING = object() +SPEED = object() + +enable_speed = False +enable_warning = False +enable_notice = False + +# callback, interface: level, str +debug_function: Optional[Callable[[str, str], None]] = None +_debug_indent = 0 +_start_time = time.time() + + +def reset_time(): + global _start_time, _debug_indent + _start_time = time.time() + _debug_indent = 0 + + +def increase_indent(func): + """Decorator for makin """ + def wrapper(*args, **kwargs): + with increase_indent_cm(): + return func(*args, **kwargs) + return wrapper + + +@contextmanager +def increase_indent_cm(title=None, color='MAGENTA'): + global _debug_indent + if title: + dbg('Start: ' + title, color=color) + _debug_indent += 1 + try: + yield + finally: + _debug_indent -= 1 + if title: + dbg('End: ' + title, color=color) + + +def dbg(message, *args, color='GREEN'): + """ Looks at the stack, to see if a debug message should be printed. """ + assert color + + if debug_function and enable_notice: + i = ' ' * _debug_indent + _lazy_colorama_init() + debug_function(color, i + 'dbg: ' + message % tuple(repr(a) for a in args)) + + +def warning(message, *args, format=True): + if debug_function and enable_warning: + i = ' ' * _debug_indent + if format: + message = message % tuple(repr(a) for a in args) + debug_function('RED', i + 'warning: ' + message) + + +def speed(name): + if debug_function and enable_speed: + now = time.time() + i = ' ' * _debug_indent + debug_function('YELLOW', i + 'speed: ' + '%s %s' % (name, now - _start_time)) + + +def print_to_stdout(color, str_out): + """ + The default debug function that prints to standard out. + + :param str color: A string that is an attribute of ``colorama.Fore``. + """ + col = getattr(Fore, color) + _lazy_colorama_init() + print(col + str_out + Fore.RESET) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/file_io.py b/bundle/jedi-vim/pythonx/jedi/jedi/file_io.py new file mode 100644 index 000000000..ead17335c --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/file_io.py @@ -0,0 +1,83 @@ +import os + +from parso import file_io + + +class AbstractFolderIO: + def __init__(self, path): + self.path = path + + def get_base_name(self): + raise NotImplementedError + + def list(self): + raise NotImplementedError + + def get_file_io(self, name): + raise NotImplementedError + + def get_parent_folder(self): + raise NotImplementedError + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.path) + + +class FolderIO(AbstractFolderIO): + def get_base_name(self): + return os.path.basename(self.path) + + def list(self): + return os.listdir(self.path) + + def get_file_io(self, name): + return FileIO(os.path.join(self.path, name)) + + def get_parent_folder(self): + return FolderIO(os.path.dirname(self.path)) + + def walk(self): + for root, dirs, files in os.walk(self.path): + root_folder_io = FolderIO(root) + original_folder_ios = [FolderIO(os.path.join(root, d)) for d in dirs] + modified_folder_ios = list(original_folder_ios) + yield ( + root_folder_io, + modified_folder_ios, + [FileIO(os.path.join(root, f)) for f in files], + ) + modified_iterator = iter(reversed(modified_folder_ios)) + current = next(modified_iterator, None) + i = len(original_folder_ios) + for folder_io in reversed(original_folder_ios): + i -= 1 # Basically enumerate but reversed + if current is folder_io: + current = next(modified_iterator, None) + else: + del dirs[i] + + +class FileIOFolderMixin: + def get_parent_folder(self): + return FolderIO(os.path.dirname(self.path)) + + +class ZipFileIO(file_io.KnownContentFileIO, FileIOFolderMixin): + """For .zip and .egg archives""" + def __init__(self, path, code, zip_path): + super().__init__(path, code) + self._zip_path = zip_path + + def get_last_modified(self): + try: + return os.path.getmtime(self._zip_path) + except (FileNotFoundError, PermissionError, NotADirectoryError): + return None + + +class FileIO(file_io.FileIO, FileIOFolderMixin): + pass + + +class KnownContentFileIO(file_io.KnownContentFileIO, FileIOFolderMixin): + pass diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/__init__.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/__init__.py new file mode 100644 index 000000000..f4bf9a9c3 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/__init__.py @@ -0,0 +1,198 @@ +""" +Type inference of Python code in |jedi| is based on three assumptions: + +* The code uses as least side effects as possible. Jedi understands certain + list/tuple/set modifications, but there's no guarantee that Jedi detects + everything (list.append in different modules for example). +* No magic is being used: + + - metaclasses + - ``setattr()`` / ``__import__()`` + - writing to ``globals()``, ``locals()``, ``object.__dict__`` +* The programmer is not a total dick, e.g. like `this + `_ :-) + +The actual algorithm is based on a principle I call lazy type inference. That +said, the typical entry point for static analysis is calling +``infer_expr_stmt``. There's separate logic for autocompletion in the API, the +inference_state is all about inferring an expression. + +TODO this paragraph is not what jedi does anymore, it's similar, but not the +same. + +Now you need to understand what follows after ``infer_expr_stmt``. Let's +make an example:: + + import datetime + datetime.date.toda# <-- cursor here + +First of all, this module doesn't care about completion. It really just cares +about ``datetime.date``. At the end of the procedure ``infer_expr_stmt`` will +return the ``date`` class. + +To *visualize* this (simplified): + +- ``InferenceState.infer_expr_stmt`` doesn't do much, because there's no assignment. +- ``Context.infer_node`` cares for resolving the dotted path +- ``InferenceState.find_types`` searches for global definitions of datetime, which + it finds in the definition of an import, by scanning the syntax tree. +- Using the import logic, the datetime module is found. +- Now ``find_types`` is called again by ``infer_node`` to find ``date`` + inside the datetime module. + +Now what would happen if we wanted ``datetime.date.foo.bar``? Two more +calls to ``find_types``. However the second call would be ignored, because the +first one would return nothing (there's no foo attribute in ``date``). + +What if the import would contain another ``ExprStmt`` like this:: + + from foo import bar + Date = bar.baz + +Well... You get it. Just another ``infer_expr_stmt`` recursion. It's really +easy. Python can obviously get way more complicated then this. To understand +tuple assignments, list comprehensions and everything else, a lot more code had +to be written. + +Jedi has been tested very well, so you can just start modifying code. It's best +to write your own test first for your "new" feature. Don't be scared of +breaking stuff. As long as the tests pass, you're most likely to be fine. + +I need to mention now that lazy type inference is really good because it +only *inferes* what needs to be *inferred*. All the statements and modules +that are not used are just being ignored. +""" +import parso +from jedi.file_io import FileIO + +from jedi import debug +from jedi import settings +from jedi.inference import imports +from jedi.inference import recursion +from jedi.inference.cache import inference_state_function_cache +from jedi.inference import helpers +from jedi.inference.names import TreeNameDefinition +from jedi.inference.base_value import ContextualizedNode, \ + ValueSet, iterate_values +from jedi.inference.value import ClassValue, FunctionValue +from jedi.inference.syntax_tree import infer_expr_stmt, \ + check_tuple_assignments, tree_name_to_values +from jedi.inference.imports import follow_error_node_imports_if_possible +from jedi.plugins import plugin_manager + + +class InferenceState: + def __init__(self, project, environment=None, script_path=None): + if environment is None: + environment = project.get_environment() + self.environment = environment + self.script_path = script_path + self.compiled_subprocess = environment.get_inference_state_subprocess(self) + self.grammar = environment.get_grammar() + + self.latest_grammar = parso.load_grammar(version='3.7') + self.memoize_cache = {} # for memoize decorators + self.module_cache = imports.ModuleCache() # does the job of `sys.modules`. + self.stub_module_cache = {} # Dict[Tuple[str, ...], Optional[ModuleValue]] + self.compiled_cache = {} # see `inference.compiled.create()` + self.inferred_element_counts = {} + self.mixed_cache = {} # see `inference.compiled.mixed._create()` + self.analysis = [] + self.dynamic_params_depth = 0 + self.is_analysis = False + self.project = project + self.access_cache = {} + self.allow_descriptor_getattr = False + self.flow_analysis_enabled = True + + self.reset_recursion_limitations() + + def import_module(self, import_names, sys_path=None, prefer_stubs=True): + return imports.import_module_by_names( + self, import_names, sys_path, prefer_stubs=prefer_stubs) + + @staticmethod + @plugin_manager.decorate() + def execute(value, arguments): + debug.dbg('execute: %s %s', value, arguments) + with debug.increase_indent_cm(): + value_set = value.py__call__(arguments=arguments) + debug.dbg('execute result: %s in %s', value_set, value) + return value_set + + # mypy doesn't suppport decorated propeties (https://github.com/python/mypy/issues/1362) + @property # type: ignore[misc] + @inference_state_function_cache() + def builtins_module(self): + module_name = 'builtins' + builtins_module, = self.import_module((module_name,), sys_path=()) + return builtins_module + + @property # type: ignore[misc] + @inference_state_function_cache() + def typing_module(self): + typing_module, = self.import_module(('typing',)) + return typing_module + + def reset_recursion_limitations(self): + self.recursion_detector = recursion.RecursionDetector() + self.execution_recursion_detector = recursion.ExecutionRecursionDetector(self) + + def get_sys_path(self, **kwargs): + """Convenience function""" + return self.project._get_sys_path(self, **kwargs) + + def infer(self, context, name): + def_ = name.get_definition(import_name_always=True) + if def_ is not None: + type_ = def_.type + is_classdef = type_ == 'classdef' + if is_classdef or type_ == 'funcdef': + if is_classdef: + c = ClassValue(self, context, name.parent) + else: + c = FunctionValue.from_context(context, name.parent) + return ValueSet([c]) + + if type_ == 'expr_stmt': + is_simple_name = name.parent.type not in ('power', 'trailer') + if is_simple_name: + return infer_expr_stmt(context, def_, name) + if type_ == 'for_stmt': + container_types = context.infer_node(def_.children[3]) + cn = ContextualizedNode(context, def_.children[3]) + for_types = iterate_values(container_types, cn) + n = TreeNameDefinition(context, name) + return check_tuple_assignments(n, for_types) + if type_ in ('import_from', 'import_name'): + return imports.infer_import(context, name) + if type_ == 'with_stmt': + return tree_name_to_values(self, context, name) + elif type_ == 'param': + return context.py__getattribute__(name.value, position=name.end_pos) + elif type_ == 'namedexpr_test': + return context.infer_node(def_) + else: + result = follow_error_node_imports_if_possible(context, name) + if result is not None: + return result + + return helpers.infer_call_of_leaf(context, name) + + def parse_and_get_code(self, code=None, path=None, + use_latest_grammar=False, file_io=None, **kwargs): + if code is None: + if file_io is None: + file_io = FileIO(path) + code = file_io.read() + # We cannot just use parso, because it doesn't use errors='replace'. + code = parso.python_bytes_to_unicode(code, encoding='utf-8', errors='replace') + + if len(code) > settings._cropped_file_size: + code = code[:settings._cropped_file_size] + + grammar = self.latest_grammar if use_latest_grammar else self.grammar + return grammar.parse(code=code, path=path, file_io=file_io, **kwargs), code + + def parse(self, *args, **kwargs): + return self.parse_and_get_code(*args, **kwargs)[0] diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/analysis.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/analysis.py new file mode 100644 index 000000000..c272a9cb8 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/analysis.py @@ -0,0 +1,213 @@ +""" +Module for statical analysis. +""" +from parso.python import tree + +from jedi import debug +from jedi.inference.helpers import is_string + + +CODES = { + 'attribute-error': (1, AttributeError, 'Potential AttributeError.'), + 'name-error': (2, NameError, 'Potential NameError.'), + 'import-error': (3, ImportError, 'Potential ImportError.'), + 'type-error-too-many-arguments': (4, TypeError, None), + 'type-error-too-few-arguments': (5, TypeError, None), + 'type-error-keyword-argument': (6, TypeError, None), + 'type-error-multiple-values': (7, TypeError, None), + 'type-error-star-star': (8, TypeError, None), + 'type-error-star': (9, TypeError, None), + 'type-error-operation': (10, TypeError, None), + 'type-error-not-iterable': (11, TypeError, None), + 'type-error-isinstance': (12, TypeError, None), + 'type-error-not-subscriptable': (13, TypeError, None), + 'value-error-too-many-values': (14, ValueError, None), + 'value-error-too-few-values': (15, ValueError, None), +} + + +class Error: + def __init__(self, name, module_path, start_pos, message=None): + self.path = module_path + self._start_pos = start_pos + self.name = name + if message is None: + message = CODES[self.name][2] + self.message = message + + @property + def line(self): + return self._start_pos[0] + + @property + def column(self): + return self._start_pos[1] + + @property + def code(self): + # The class name start + first = self.__class__.__name__[0] + return first + str(CODES[self.name][0]) + + def __str__(self): + return '%s:%s:%s: %s %s' % (self.path, self.line, self.column, + self.code, self.message) + + def __eq__(self, other): + return (self.path == other.path and self.name == other.name + and self._start_pos == other._start_pos) + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash((self.path, self._start_pos, self.name)) + + def __repr__(self): + return '<%s %s: %s@%s,%s>' % (self.__class__.__name__, + self.name, self.path, + self._start_pos[0], self._start_pos[1]) + + +class Warning(Error): + pass + + +def add(node_context, error_name, node, message=None, typ=Error, payload=None): + exception = CODES[error_name][1] + if _check_for_exception_catch(node_context, node, exception, payload): + return + + # TODO this path is probably not right + module_context = node_context.get_root_context() + module_path = module_context.py__file__() + issue_instance = typ(error_name, module_path, node.start_pos, message) + debug.warning(str(issue_instance), format=False) + node_context.inference_state.analysis.append(issue_instance) + return issue_instance + + +def _check_for_setattr(instance): + """ + Check if there's any setattr method inside an instance. If so, return True. + """ + module = instance.get_root_context() + node = module.tree_node + if node is None: + # If it's a compiled module or doesn't have a tree_node + return False + + try: + stmt_names = node.get_used_names()['setattr'] + except KeyError: + return False + + return any(node.start_pos < n.start_pos < node.end_pos + # Check if it's a function called setattr. + and not (n.parent.type == 'funcdef' and n.parent.name == n) + for n in stmt_names) + + +def add_attribute_error(name_context, lookup_value, name): + message = ('AttributeError: %s has no attribute %s.' % (lookup_value, name)) + # Check for __getattr__/__getattribute__ existance and issue a warning + # instead of an error, if that happens. + typ = Error + if lookup_value.is_instance() and not lookup_value.is_compiled(): + # TODO maybe make a warning for __getattr__/__getattribute__ + + if _check_for_setattr(lookup_value): + typ = Warning + + payload = lookup_value, name + add(name_context, 'attribute-error', name, message, typ, payload) + + +def _check_for_exception_catch(node_context, jedi_name, exception, payload=None): + """ + Checks if a jedi object (e.g. `Statement`) sits inside a try/catch and + doesn't count as an error (if equal to `exception`). + Also checks `hasattr` for AttributeErrors and uses the `payload` to compare + it. + Returns True if the exception was catched. + """ + def check_match(cls, exception): + if not cls.is_class(): + return False + + for python_cls in exception.mro(): + if cls.py__name__() == python_cls.__name__ \ + and cls.parent_context.is_builtins_module(): + return True + return False + + def check_try_for_except(obj, exception): + # Only nodes in try + iterator = iter(obj.children) + for branch_type in iterator: + next(iterator) # The colon + suite = next(iterator) + if branch_type == 'try' \ + and not (branch_type.start_pos < jedi_name.start_pos <= suite.end_pos): + return False + + for node in obj.get_except_clause_tests(): + if node is None: + return True # An exception block that catches everything. + else: + except_classes = node_context.infer_node(node) + for cls in except_classes: + from jedi.inference.value import iterable + if isinstance(cls, iterable.Sequence) and \ + cls.array_type == 'tuple': + # multiple exceptions + for lazy_value in cls.py__iter__(): + for typ in lazy_value.infer(): + if check_match(typ, exception): + return True + else: + if check_match(cls, exception): + return True + + def check_hasattr(node, suite): + try: + assert suite.start_pos <= jedi_name.start_pos < suite.end_pos + assert node.type in ('power', 'atom_expr') + base = node.children[0] + assert base.type == 'name' and base.value == 'hasattr' + trailer = node.children[1] + assert trailer.type == 'trailer' + arglist = trailer.children[1] + assert arglist.type == 'arglist' + from jedi.inference.arguments import TreeArguments + args = TreeArguments(node_context.inference_state, node_context, arglist) + unpacked_args = list(args.unpack()) + # Arguments should be very simple + assert len(unpacked_args) == 2 + + # Check name + key, lazy_value = unpacked_args[1] + names = list(lazy_value.infer()) + assert len(names) == 1 and is_string(names[0]) + assert names[0].get_safe_value() == payload[1].value + + # Check objects + key, lazy_value = unpacked_args[0] + objects = lazy_value.infer() + return payload[0] in objects + except AssertionError: + return False + + obj = jedi_name + while obj is not None and not isinstance(obj, (tree.Function, tree.Class)): + if isinstance(obj, tree.Flow): + # try/except catch check + if obj.type == 'try_stmt' and check_try_for_except(obj, exception): + return True + # hasattr check + if exception == AttributeError and obj.type in ('if_stmt', 'while_stmt'): + if check_hasattr(obj.children[1], obj.children[3]): + return True + obj = obj.parent + + return False diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/arguments.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/arguments.py new file mode 100644 index 000000000..8602f494d --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/arguments.py @@ -0,0 +1,335 @@ +import re +from itertools import zip_longest + +from parso.python import tree + +from jedi import debug +from jedi.inference.utils import PushBackIterator +from jedi.inference import analysis +from jedi.inference.lazy_value import LazyKnownValue, LazyKnownValues, \ + LazyTreeValue, get_merged_lazy_value +from jedi.inference.names import ParamName, TreeNameDefinition, AnonymousParamName +from jedi.inference.base_value import NO_VALUES, ValueSet, ContextualizedNode +from jedi.inference.value import iterable +from jedi.inference.cache import inference_state_as_method_param_cache + + +def try_iter_content(types, depth=0): + """Helper method for static analysis.""" + if depth > 10: + # It's possible that a loop has references on itself (especially with + # CompiledValue). Therefore don't loop infinitely. + return + + for typ in types: + try: + f = typ.py__iter__ + except AttributeError: + pass + else: + for lazy_value in f(): + try_iter_content(lazy_value.infer(), depth + 1) + + +class ParamIssue(Exception): + pass + + +def repack_with_argument_clinic(clinic_string): + """ + Transforms a function or method with arguments to the signature that is + given as an argument clinic notation. + + Argument clinic is part of CPython and used for all the functions that are + implemented in C (Python 3.7): + + str.split.__text_signature__ + # Results in: '($self, /, sep=None, maxsplit=-1)' + """ + def decorator(func): + def wrapper(value, arguments): + try: + args = tuple(iterate_argument_clinic( + value.inference_state, + arguments, + clinic_string, + )) + except ParamIssue: + return NO_VALUES + else: + return func(value, *args) + + return wrapper + return decorator + + +def iterate_argument_clinic(inference_state, arguments, clinic_string): + """Uses a list with argument clinic information (see PEP 436).""" + clinic_args = list(_parse_argument_clinic(clinic_string)) + + iterator = PushBackIterator(arguments.unpack()) + for i, (name, optional, allow_kwargs, stars) in enumerate(clinic_args): + if stars == 1: + lazy_values = [] + for key, argument in iterator: + if key is not None: + iterator.push_back((key, argument)) + break + + lazy_values.append(argument) + yield ValueSet([iterable.FakeTuple(inference_state, lazy_values)]) + lazy_values + continue + elif stars == 2: + raise NotImplementedError() + key, argument = next(iterator, (None, None)) + if key is not None: + debug.warning('Keyword arguments in argument clinic are currently not supported.') + raise ParamIssue + if argument is None and not optional: + debug.warning('TypeError: %s expected at least %s arguments, got %s', + name, len(clinic_args), i) + raise ParamIssue + + value_set = NO_VALUES if argument is None else argument.infer() + + if not value_set and not optional: + # For the stdlib we always want values. If we don't get them, + # that's ok, maybe something is too hard to resolve, however, + # we will not proceed with the type inference of that function. + debug.warning('argument_clinic "%s" not resolvable.', name) + raise ParamIssue + yield value_set + + +def _parse_argument_clinic(string): + allow_kwargs = False + optional = False + while string: + # Optional arguments have to begin with a bracket. And should always be + # at the end of the arguments. This is therefore not a proper argument + # clinic implementation. `range()` for exmple allows an optional start + # value at the beginning. + match = re.match(r'(?:(?:(\[),? ?|, ?|)(\**\w+)|, ?/)\]*', string) + string = string[len(match.group(0)):] + if not match.group(2): # A slash -> allow named arguments + allow_kwargs = True + continue + optional = optional or bool(match.group(1)) + word = match.group(2) + stars = word.count('*') + word = word[stars:] + yield (word, optional, allow_kwargs, stars) + if stars: + allow_kwargs = True + + +class _AbstractArgumentsMixin: + def unpack(self, funcdef=None): + raise NotImplementedError + + def get_calling_nodes(self): + return [] + + +class AbstractArguments(_AbstractArgumentsMixin): + context = None + argument_node = None + trailer = None + + +def unpack_arglist(arglist): + if arglist is None: + return + + if arglist.type != 'arglist' and not ( + arglist.type == 'argument' and arglist.children[0] in ('*', '**')): + yield 0, arglist + return + + iterator = iter(arglist.children) + for child in iterator: + if child == ',': + continue + elif child in ('*', '**'): + c = next(iterator, None) + assert c is not None + yield len(child.value), c + elif child.type == 'argument' and \ + child.children[0] in ('*', '**'): + assert len(child.children) == 2 + yield len(child.children[0].value), child.children[1] + else: + yield 0, child + + +class TreeArguments(AbstractArguments): + def __init__(self, inference_state, context, argument_node, trailer=None): + """ + :param argument_node: May be an argument_node or a list of nodes. + """ + self.argument_node = argument_node + self.context = context + self._inference_state = inference_state + self.trailer = trailer # Can be None, e.g. in a class definition. + + @classmethod + @inference_state_as_method_param_cache() + def create_cached(cls, *args, **kwargs): + return cls(*args, **kwargs) + + def unpack(self, funcdef=None): + named_args = [] + for star_count, el in unpack_arglist(self.argument_node): + if star_count == 1: + arrays = self.context.infer_node(el) + iterators = [_iterate_star_args(self.context, a, el, funcdef) + for a in arrays] + for values in list(zip_longest(*iterators)): + yield None, get_merged_lazy_value( + [v for v in values if v is not None] + ) + elif star_count == 2: + arrays = self.context.infer_node(el) + for dct in arrays: + yield from _star_star_dict(self.context, dct, el, funcdef) + else: + if el.type == 'argument': + c = el.children + if len(c) == 3: # Keyword argument. + named_args.append((c[0].value, LazyTreeValue(self.context, c[2]),)) + else: # Generator comprehension. + # Include the brackets with the parent. + sync_comp_for = el.children[1] + if sync_comp_for.type == 'comp_for': + sync_comp_for = sync_comp_for.children[1] + comp = iterable.GeneratorComprehension( + self._inference_state, + defining_context=self.context, + sync_comp_for_node=sync_comp_for, + entry_node=el.children[0], + ) + yield None, LazyKnownValue(comp) + else: + yield None, LazyTreeValue(self.context, el) + + # Reordering arguments is necessary, because star args sometimes appear + # after named argument, but in the actual order it's prepended. + yield from named_args + + def _as_tree_tuple_objects(self): + for star_count, argument in unpack_arglist(self.argument_node): + default = None + if argument.type == 'argument': + if len(argument.children) == 3: # Keyword argument. + argument, default = argument.children[::2] + yield argument, default, star_count + + def iter_calling_names_with_star(self): + for name, default, star_count in self._as_tree_tuple_objects(): + # TODO this function is a bit strange. probably refactor? + if not star_count or not isinstance(name, tree.Name): + continue + + yield TreeNameDefinition(self.context, name) + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.argument_node) + + def get_calling_nodes(self): + old_arguments_list = [] + arguments = self + + while arguments not in old_arguments_list: + if not isinstance(arguments, TreeArguments): + break + + old_arguments_list.append(arguments) + for calling_name in reversed(list(arguments.iter_calling_names_with_star())): + names = calling_name.goto() + if len(names) != 1: + break + if isinstance(names[0], AnonymousParamName): + # Dynamic parameters should not have calling nodes, because + # they are dynamic and extremely random. + return [] + if not isinstance(names[0], ParamName): + break + executed_param_name = names[0].get_executed_param_name() + arguments = executed_param_name.arguments + break + + if arguments.argument_node is not None: + return [ContextualizedNode(arguments.context, arguments.argument_node)] + if arguments.trailer is not None: + return [ContextualizedNode(arguments.context, arguments.trailer)] + return [] + + +class ValuesArguments(AbstractArguments): + def __init__(self, values_list): + self._values_list = values_list + + def unpack(self, funcdef=None): + for values in self._values_list: + yield None, LazyKnownValues(values) + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._values_list) + + +class TreeArgumentsWrapper(_AbstractArgumentsMixin): + def __init__(self, arguments): + self._wrapped_arguments = arguments + + @property + def context(self): + return self._wrapped_arguments.context + + @property + def argument_node(self): + return self._wrapped_arguments.argument_node + + @property + def trailer(self): + return self._wrapped_arguments.trailer + + def unpack(self, func=None): + raise NotImplementedError + + def get_calling_nodes(self): + return self._wrapped_arguments.get_calling_nodes() + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._wrapped_arguments) + + +def _iterate_star_args(context, array, input_node, funcdef=None): + if not array.py__getattribute__('__iter__'): + if funcdef is not None: + # TODO this funcdef should not be needed. + m = "TypeError: %s() argument after * must be a sequence, not %s" \ + % (funcdef.name.value, array) + analysis.add(context, 'type-error-star', input_node, message=m) + try: + iter_ = array.py__iter__ + except AttributeError: + pass + else: + yield from iter_() + + +def _star_star_dict(context, array, input_node, funcdef): + from jedi.inference.value.instance import CompiledInstance + if isinstance(array, CompiledInstance) and array.name.string_name == 'dict': + # For now ignore this case. In the future add proper iterators and just + # make one call without crazy isinstance checks. + return {} + elif isinstance(array, iterable.Sequence) and array.array_type == 'dict': + return array.exact_key_items() + else: + if funcdef is not None: + m = "TypeError: %s argument after ** must be a mapping, not %s" \ + % (funcdef.name.value, array) + analysis.add(context, 'type-error-star-star', input_node, message=m) + return {} diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/base_value.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/base_value.py new file mode 100644 index 000000000..31b729377 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/base_value.py @@ -0,0 +1,558 @@ +""" +Values are the "values" that Python would return. However Values are at the +same time also the "values" that a user is currently sitting in. + +A ValueSet is typically used to specify the return of a function or any other +static analysis operation. In jedi there are always multiple returns and not +just one. +""" +from functools import reduce +from operator import add +from itertools import zip_longest + +from parso.python.tree import Name + +from jedi import debug +from jedi.parser_utils import clean_scope_docstring +from jedi.inference.helpers import SimpleGetItemNotFound +from jedi.inference.utils import safe_property +from jedi.inference.cache import inference_state_as_method_param_cache +from jedi.cache import memoize_method + +sentinel = object() + + +class HasNoContext(Exception): + pass + + +class HelperValueMixin: + def get_root_context(self): + value = self + if value.parent_context is None: + return value.as_context() + + while True: + if value.parent_context is None: + return value + value = value.parent_context + + def execute(self, arguments): + return self.inference_state.execute(self, arguments=arguments) + + def execute_with_values(self, *value_list): + from jedi.inference.arguments import ValuesArguments + arguments = ValuesArguments([ValueSet([value]) for value in value_list]) + return self.inference_state.execute(self, arguments) + + def execute_annotation(self): + return self.execute_with_values() + + def gather_annotation_classes(self): + return ValueSet([self]) + + def merge_types_of_iterate(self, contextualized_node=None, is_async=False): + return ValueSet.from_sets( + lazy_value.infer() + for lazy_value in self.iterate(contextualized_node, is_async) + ) + + def _get_value_filters(self, name_or_str): + origin_scope = name_or_str if isinstance(name_or_str, Name) else None + yield from self.get_filters(origin_scope=origin_scope) + # This covers the case where a stub files are incomplete. + if self.is_stub(): + from jedi.inference.gradual.conversion import convert_values + for c in convert_values(ValueSet({self})): + yield from c.get_filters() + + def goto(self, name_or_str, name_context=None, analysis_errors=True): + from jedi.inference import finder + filters = self._get_value_filters(name_or_str) + names = finder.filter_name(filters, name_or_str) + debug.dbg('context.goto %s in (%s): %s', name_or_str, self, names) + return names + + def py__getattribute__(self, name_or_str, name_context=None, position=None, + analysis_errors=True): + """ + :param position: Position of the last statement -> tuple of line, column + """ + if name_context is None: + name_context = self + names = self.goto(name_or_str, name_context, analysis_errors) + values = ValueSet.from_sets(name.infer() for name in names) + if not values: + n = name_or_str.value if isinstance(name_or_str, Name) else name_or_str + values = self.py__getattribute__alternatives(n) + + if not names and not values and analysis_errors: + if isinstance(name_or_str, Name): + from jedi.inference import analysis + analysis.add_attribute_error( + name_context, self, name_or_str) + debug.dbg('context.names_to_types: %s -> %s', names, values) + return values + + def py__await__(self): + await_value_set = self.py__getattribute__("__await__") + if not await_value_set: + debug.warning('Tried to run __await__ on value %s', self) + return await_value_set.execute_with_values() + + def py__name__(self): + return self.name.string_name + + def iterate(self, contextualized_node=None, is_async=False): + debug.dbg('iterate %s', self) + if is_async: + from jedi.inference.lazy_value import LazyKnownValues + # TODO if no __aiter__ values are there, error should be: + # TypeError: 'async for' requires an object with __aiter__ method, got int + return iter([ + LazyKnownValues( + self.py__getattribute__('__aiter__').execute_with_values() + .py__getattribute__('__anext__').execute_with_values() + .py__getattribute__('__await__').execute_with_values() + .py__stop_iteration_returns() + ) # noqa: E124 + ]) + return self.py__iter__(contextualized_node) + + def is_sub_class_of(self, class_value): + with debug.increase_indent_cm('subclass matching of %s <=> %s' % (self, class_value), + color='BLUE'): + for cls in self.py__mro__(): + if cls.is_same_class(class_value): + debug.dbg('matched subclass True', color='BLUE') + return True + debug.dbg('matched subclass False', color='BLUE') + return False + + def is_same_class(self, class2): + # Class matching should prefer comparisons that are not this function. + if type(class2).is_same_class != HelperValueMixin.is_same_class: + return class2.is_same_class(self) + return self == class2 + + @memoize_method + def as_context(self, *args, **kwargs): + return self._as_context(*args, **kwargs) + + +class Value(HelperValueMixin): + """ + To be implemented by subclasses. + """ + tree_node = None + # Possible values: None, tuple, list, dict and set. Here to deal with these + # very important containers. + array_type = None + api_type = 'not_defined_please_report_bug' + + def __init__(self, inference_state, parent_context=None): + self.inference_state = inference_state + self.parent_context = parent_context + + def py__getitem__(self, index_value_set, contextualized_node): + from jedi.inference import analysis + # TODO this value is probably not right. + analysis.add( + contextualized_node.context, + 'type-error-not-subscriptable', + contextualized_node.node, + message="TypeError: '%s' object is not subscriptable" % self + ) + return NO_VALUES + + def py__simple_getitem__(self, index): + raise SimpleGetItemNotFound + + def py__iter__(self, contextualized_node=None): + if contextualized_node is not None: + from jedi.inference import analysis + analysis.add( + contextualized_node.context, + 'type-error-not-iterable', + contextualized_node.node, + message="TypeError: '%s' object is not iterable" % self) + return iter([]) + + def py__next__(self, contextualized_node=None): + return self.py__iter__(contextualized_node) + + def get_signatures(self): + return [] + + def is_class(self): + return False + + def is_class_mixin(self): + return False + + def is_instance(self): + return False + + def is_function(self): + return False + + def is_module(self): + return False + + def is_namespace(self): + return False + + def is_compiled(self): + return False + + def is_bound_method(self): + return False + + def is_builtins_module(self): + return False + + def py__bool__(self): + """ + Since Wrapper is a super class for classes, functions and modules, + the return value will always be true. + """ + return True + + def py__doc__(self): + try: + self.tree_node.get_doc_node + except AttributeError: + return '' + else: + return clean_scope_docstring(self.tree_node) + + def get_safe_value(self, default=sentinel): + if default is sentinel: + raise ValueError("There exists no safe value for value %s" % self) + return default + + def execute_operation(self, other, operator): + debug.warning("%s not possible between %s and %s", operator, self, other) + return NO_VALUES + + def py__call__(self, arguments): + debug.warning("no execution possible %s", self) + return NO_VALUES + + def py__stop_iteration_returns(self): + debug.warning("Not possible to return the stop iterations of %s", self) + return NO_VALUES + + def py__getattribute__alternatives(self, name_or_str): + """ + For now a way to add values in cases like __getattr__. + """ + return NO_VALUES + + def py__get__(self, instance, class_value): + debug.warning("No __get__ defined on %s", self) + return ValueSet([self]) + + def py__get__on_class(self, calling_instance, instance, class_value): + return NotImplemented + + def get_qualified_names(self): + # Returns Optional[Tuple[str, ...]] + return None + + def is_stub(self): + # The root value knows if it's a stub or not. + return self.parent_context.is_stub() + + def _as_context(self): + raise HasNoContext + + @property + def name(self): + raise NotImplementedError + + def get_type_hint(self, add_class_info=True): + return None + + def infer_type_vars(self, value_set): + """ + When the current instance represents a type annotation, this method + tries to find information about undefined type vars and returns a dict + from type var name to value set. + + This is for example important to understand what `iter([1])` returns. + According to typeshed, `iter` returns an `Iterator[_T]`: + + def iter(iterable: Iterable[_T]) -> Iterator[_T]: ... + + This functions would generate `int` for `_T` in this case, because it + unpacks the `Iterable`. + + Parameters + ---------- + + `self`: represents the annotation of the current parameter to infer the + value for. In the above example, this would initially be the + `Iterable[_T]` of the `iterable` parameter and then, when recursing, + just the `_T` generic parameter. + + `value_set`: represents the actual argument passed to the parameter + we're inferrined for, or (for recursive calls) their types. In the + above example this would first be the representation of the list + `[1]` and then, when recursing, just of `1`. + """ + return {} + + +def iterate_values(values, contextualized_node=None, is_async=False): + """ + Calls `iterate`, on all values but ignores the ordering and just returns + all values that the iterate functions yield. + """ + return ValueSet.from_sets( + lazy_value.infer() + for lazy_value in values.iterate(contextualized_node, is_async=is_async) + ) + + +class _ValueWrapperBase(HelperValueMixin): + @safe_property + def name(self): + from jedi.inference.names import ValueName + wrapped_name = self._wrapped_value.name + if wrapped_name.tree_name is not None: + return ValueName(self, wrapped_name.tree_name) + else: + from jedi.inference.compiled import CompiledValueName + return CompiledValueName(self, wrapped_name.string_name) + + @classmethod + @inference_state_as_method_param_cache() + def create_cached(cls, inference_state, *args, **kwargs): + return cls(*args, **kwargs) + + def __getattr__(self, name): + assert name != '_wrapped_value', 'Problem with _get_wrapped_value' + return getattr(self._wrapped_value, name) + + +class LazyValueWrapper(_ValueWrapperBase): + @safe_property + @memoize_method + def _wrapped_value(self): + with debug.increase_indent_cm('Resolve lazy value wrapper'): + return self._get_wrapped_value() + + def __repr__(self): + return '<%s>' % (self.__class__.__name__) + + def _get_wrapped_value(self): + raise NotImplementedError + + +class ValueWrapper(_ValueWrapperBase): + def __init__(self, wrapped_value): + self._wrapped_value = wrapped_value + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, self._wrapped_value) + + +class TreeValue(Value): + def __init__(self, inference_state, parent_context, tree_node): + super().__init__(inference_state, parent_context) + self.tree_node = tree_node + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.tree_node) + + +class ContextualizedNode: + def __init__(self, context, node): + self.context = context + self.node = node + + def get_root_context(self): + return self.context.get_root_context() + + def infer(self): + return self.context.infer_node(self.node) + + def __repr__(self): + return '<%s: %s in %s>' % (self.__class__.__name__, self.node, self.context) + + +def _getitem(value, index_values, contextualized_node): + # The actual getitem call. + result = NO_VALUES + unused_values = set() + for index_value in index_values: + index = index_value.get_safe_value(default=None) + if type(index) in (float, int, str, slice, bytes): + try: + result |= value.py__simple_getitem__(index) + continue + except SimpleGetItemNotFound: + pass + + unused_values.add(index_value) + + # The index was somehow not good enough or simply a wrong type. + # Therefore we now iterate through all the values and just take + # all results. + if unused_values or not index_values: + result |= value.py__getitem__( + ValueSet(unused_values), + contextualized_node + ) + debug.dbg('py__getitem__ result: %s', result) + return result + + +class ValueSet: + def __init__(self, iterable): + self._set = frozenset(iterable) + for value in iterable: + assert not isinstance(value, ValueSet) + + @classmethod + def _from_frozen_set(cls, frozenset_): + self = cls.__new__(cls) + self._set = frozenset_ + return self + + @classmethod + def from_sets(cls, sets): + """ + Used to work with an iterable of set. + """ + aggregated = set() + for set_ in sets: + if isinstance(set_, ValueSet): + aggregated |= set_._set + else: + aggregated |= frozenset(set_) + return cls._from_frozen_set(frozenset(aggregated)) + + def __or__(self, other): + return self._from_frozen_set(self._set | other._set) + + def __and__(self, other): + return self._from_frozen_set(self._set & other._set) + + def __iter__(self): + return iter(self._set) + + def __bool__(self): + return bool(self._set) + + def __len__(self): + return len(self._set) + + def __repr__(self): + return 'S{%s}' % (', '.join(str(s) for s in self._set)) + + def filter(self, filter_func): + return self.__class__(filter(filter_func, self._set)) + + def __getattr__(self, name): + def mapper(*args, **kwargs): + return self.from_sets( + getattr(value, name)(*args, **kwargs) + for value in self._set + ) + return mapper + + def __eq__(self, other): + return self._set == other._set + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash(self._set) + + def py__class__(self): + return ValueSet(c.py__class__() for c in self._set) + + def iterate(self, contextualized_node=None, is_async=False): + from jedi.inference.lazy_value import get_merged_lazy_value + type_iters = [c.iterate(contextualized_node, is_async=is_async) for c in self._set] + for lazy_values in zip_longest(*type_iters): + yield get_merged_lazy_value( + [l for l in lazy_values if l is not None] + ) + + def execute(self, arguments): + return ValueSet.from_sets(c.inference_state.execute(c, arguments) for c in self._set) + + def execute_with_values(self, *args, **kwargs): + return ValueSet.from_sets(c.execute_with_values(*args, **kwargs) for c in self._set) + + def goto(self, *args, **kwargs): + return reduce(add, [c.goto(*args, **kwargs) for c in self._set], []) + + def py__getattribute__(self, *args, **kwargs): + return ValueSet.from_sets(c.py__getattribute__(*args, **kwargs) for c in self._set) + + def get_item(self, *args, **kwargs): + return ValueSet.from_sets(_getitem(c, *args, **kwargs) for c in self._set) + + def try_merge(self, function_name): + value_set = self.__class__([]) + for c in self._set: + try: + method = getattr(c, function_name) + except AttributeError: + pass + else: + value_set |= method() + return value_set + + def gather_annotation_classes(self): + return ValueSet.from_sets([c.gather_annotation_classes() for c in self._set]) + + def get_signatures(self): + return [sig for c in self._set for sig in c.get_signatures()] + + def get_type_hint(self, add_class_info=True): + t = [v.get_type_hint(add_class_info=add_class_info) for v in self._set] + type_hints = sorted(filter(None, t)) + if len(type_hints) == 1: + return type_hints[0] + + optional = 'None' in type_hints + if optional: + type_hints.remove('None') + + if len(type_hints) == 0: + return None + elif len(type_hints) == 1: + s = type_hints[0] + else: + s = 'Union[%s]' % ', '.join(type_hints) + if optional: + s = 'Optional[%s]' % s + return s + + def infer_type_vars(self, value_set): + # Circular + from jedi.inference.gradual.annotation import merge_type_var_dicts + + type_var_dict = {} + for value in self._set: + merge_type_var_dicts( + type_var_dict, + value.infer_type_vars(value_set), + ) + return type_var_dict + + +NO_VALUES = ValueSet([]) + + +def iterator_to_value_set(func): + def wrapper(*args, **kwargs): + return ValueSet(func(*args, **kwargs)) + + return wrapper diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/cache.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/cache.py new file mode 100644 index 000000000..8360a42a4 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/cache.py @@ -0,0 +1,126 @@ +""" +- the popular ``_memoize_default`` works like a typical memoize and returns the + default otherwise. +- ``CachedMetaClass`` uses ``_memoize_default`` to do the same with classes. +""" +from functools import wraps + +from jedi import debug + +_NO_DEFAULT = object() +_RECURSION_SENTINEL = object() + + +def _memoize_default(default=_NO_DEFAULT, inference_state_is_first_arg=False, + second_arg_is_inference_state=False): + """ This is a typical memoization decorator, BUT there is one difference: + To prevent recursion it sets defaults. + + Preventing recursion is in this case the much bigger use than speed. I + don't think, that there is a big speed difference, but there are many cases + where recursion could happen (think about a = b; b = a). + """ + def func(function): + def wrapper(obj, *args, **kwargs): + # TODO These checks are kind of ugly and slow. + if inference_state_is_first_arg: + cache = obj.memoize_cache + elif second_arg_is_inference_state: + cache = args[0].memoize_cache # needed for meta classes + else: + cache = obj.inference_state.memoize_cache + + try: + memo = cache[function] + except KeyError: + cache[function] = memo = {} + + key = (obj, args, frozenset(kwargs.items())) + if key in memo: + return memo[key] + else: + if default is not _NO_DEFAULT: + memo[key] = default + rv = function(obj, *args, **kwargs) + memo[key] = rv + return rv + return wrapper + + return func + + +def inference_state_function_cache(default=_NO_DEFAULT): + def decorator(func): + return _memoize_default(default=default, inference_state_is_first_arg=True)(func) + + return decorator + + +def inference_state_method_cache(default=_NO_DEFAULT): + def decorator(func): + return _memoize_default(default=default)(func) + + return decorator + + +def inference_state_as_method_param_cache(): + def decorator(call): + return _memoize_default(second_arg_is_inference_state=True)(call) + + return decorator + + +class CachedMetaClass(type): + """ + This is basically almost the same than the decorator above, it just caches + class initializations. Either you do it this way or with decorators, but + with decorators you lose class access (isinstance, etc). + """ + @inference_state_as_method_param_cache() + def __call__(self, *args, **kwargs): + return super().__call__(*args, **kwargs) + + +def inference_state_method_generator_cache(): + """ + This is a special memoizer. It memoizes generators and also checks for + recursion errors and returns no further iterator elemends in that case. + """ + def func(function): + @wraps(function) + def wrapper(obj, *args, **kwargs): + cache = obj.inference_state.memoize_cache + try: + memo = cache[function] + except KeyError: + cache[function] = memo = {} + + key = (obj, args, frozenset(kwargs.items())) + + if key in memo: + actual_generator, cached_lst = memo[key] + else: + actual_generator = function(obj, *args, **kwargs) + cached_lst = [] + memo[key] = actual_generator, cached_lst + + i = 0 + while True: + try: + next_element = cached_lst[i] + if next_element is _RECURSION_SENTINEL: + debug.warning('Found a generator recursion for %s' % obj) + # This means we have hit a recursion. + return + except IndexError: + cached_lst.append(_RECURSION_SENTINEL) + next_element = next(actual_generator, None) + if next_element is None: + cached_lst.pop() + return + cached_lst[-1] = next_element + yield next_element + i += 1 + return wrapper + + return func diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/__init__.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/__init__.py new file mode 100644 index 000000000..09ac19f97 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/__init__.py @@ -0,0 +1,70 @@ +# This file also re-exports symbols for wider use. We configure mypy and flake8 +# to be aware that this file does this. + +from jedi.inference.compiled.value import CompiledValue, CompiledName, \ + CompiledValueFilter, CompiledValueName, create_from_access_path +from jedi.inference.base_value import LazyValueWrapper + + +def builtin_from_name(inference_state, string): + typing_builtins_module = inference_state.builtins_module + if string in ('None', 'True', 'False'): + builtins, = typing_builtins_module.non_stub_value_set + filter_ = next(builtins.get_filters()) + else: + filter_ = next(typing_builtins_module.get_filters()) + name, = filter_.get(string) + value, = name.infer() + return value + + +class ExactValue(LazyValueWrapper): + """ + This class represents exact values, that makes operations like additions + and exact boolean values possible, while still being a "normal" stub. + """ + def __init__(self, compiled_value): + self.inference_state = compiled_value.inference_state + self._compiled_value = compiled_value + + def __getattribute__(self, name): + if name in ('get_safe_value', 'execute_operation', 'access_handle', + 'negate', 'py__bool__', 'is_compiled'): + return getattr(self._compiled_value, name) + return super().__getattribute__(name) + + def _get_wrapped_value(self): + instance, = builtin_from_name( + self.inference_state, self._compiled_value.name.string_name).execute_with_values() + return instance + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._compiled_value) + + +def create_simple_object(inference_state, obj): + """ + Only allows creations of objects that are easily picklable across Python + versions. + """ + assert type(obj) in (int, float, str, bytes, slice, complex, bool), repr(obj) + compiled_value = create_from_access_path( + inference_state, + inference_state.compiled_subprocess.create_simple_object(obj) + ) + return ExactValue(compiled_value) + + +def get_string_value_set(inference_state): + return builtin_from_name(inference_state, 'str').execute_with_values() + + +def load_module(inference_state, dotted_name, **kwargs): + # Temporary, some tensorflow builtins cannot be loaded, so it's tried again + # and again and it's really slow. + if dotted_name.startswith('tensorflow.'): + return None + access_path = inference_state.compiled_subprocess.load_module(dotted_name=dotted_name, **kwargs) + if access_path is None: + return None + return create_from_access_path(inference_state, access_path) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/access.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/access.py new file mode 100644 index 000000000..91f2da081 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/access.py @@ -0,0 +1,558 @@ +import inspect +import types +import traceback +import sys +import operator as op +from collections import namedtuple +import warnings +import re +import builtins +import typing +from pathlib import Path +from typing import Optional + +from jedi.inference.compiled.getattr_static import getattr_static + +ALLOWED_GETITEM_TYPES = (str, list, tuple, bytes, bytearray, dict) + +MethodDescriptorType = type(str.replace) +# These are not considered classes and access is granted even though they have +# a __class__ attribute. +NOT_CLASS_TYPES = ( + types.BuiltinFunctionType, + types.CodeType, + types.FrameType, + types.FunctionType, + types.GeneratorType, + types.GetSetDescriptorType, + types.LambdaType, + types.MemberDescriptorType, + types.MethodType, + types.ModuleType, + types.TracebackType, + MethodDescriptorType, + types.MappingProxyType, + types.SimpleNamespace, + types.DynamicClassAttribute, +) + +# Those types don't exist in typing. +MethodDescriptorType = type(str.replace) +WrapperDescriptorType = type(set.__iter__) +# `object.__subclasshook__` is an already executed descriptor. +object_class_dict = type.__dict__["__dict__"].__get__(object) +ClassMethodDescriptorType = type(object_class_dict['__subclasshook__']) + +_sentinel = object() + +# Maps Python syntax to the operator module. +COMPARISON_OPERATORS = { + '==': op.eq, + '!=': op.ne, + 'is': op.is_, + 'is not': op.is_not, + '<': op.lt, + '<=': op.le, + '>': op.gt, + '>=': op.ge, +} + +_OPERATORS = { + '+': op.add, + '-': op.sub, +} +_OPERATORS.update(COMPARISON_OPERATORS) + +ALLOWED_DESCRIPTOR_ACCESS = ( + types.FunctionType, + types.GetSetDescriptorType, + types.MemberDescriptorType, + MethodDescriptorType, + WrapperDescriptorType, + ClassMethodDescriptorType, + staticmethod, + classmethod, +) + + +def safe_getattr(obj, name, default=_sentinel): + try: + attr, is_get_descriptor = getattr_static(obj, name) + except AttributeError: + if default is _sentinel: + raise + return default + else: + if isinstance(attr, ALLOWED_DESCRIPTOR_ACCESS): + # In case of descriptors that have get methods we cannot return + # it's value, because that would mean code execution. + # Since it's an isinstance call, code execution is still possible, + # but this is not really a security feature, but much more of a + # safety feature. Code execution is basically always possible when + # a module is imported. This is here so people don't shoot + # themselves in the foot. + return getattr(obj, name) + return attr + + +SignatureParam = namedtuple( + 'SignatureParam', + 'name has_default default default_string has_annotation annotation annotation_string kind_name' +) + + +def shorten_repr(func): + def wrapper(self): + r = func(self) + if len(r) > 50: + r = r[:50] + '..' + return r + return wrapper + + +def create_access(inference_state, obj): + return inference_state.compiled_subprocess.get_or_create_access_handle(obj) + + +def load_module(inference_state, dotted_name, sys_path): + temp, sys.path = sys.path, sys_path + try: + __import__(dotted_name) + except ImportError: + # If a module is "corrupt" or not really a Python module or whatever. + warnings.warn( + "Module %s not importable in path %s." % (dotted_name, sys_path), + UserWarning, + stacklevel=2, + ) + return None + except Exception: + # Since __import__ pretty much makes code execution possible, just + # catch any error here and print it. + warnings.warn( + "Cannot import:\n%s" % traceback.format_exc(), UserWarning, stacklevel=2 + ) + return None + finally: + sys.path = temp + + # Just access the cache after import, because of #59 as well as the very + # complicated import structure of Python. + module = sys.modules[dotted_name] + return create_access_path(inference_state, module) + + +class AccessPath: + def __init__(self, accesses): + self.accesses = accesses + + +def create_access_path(inference_state, obj): + access = create_access(inference_state, obj) + return AccessPath(access.get_access_path_tuples()) + + +def get_api_type(obj): + if inspect.isclass(obj): + return 'class' + elif inspect.ismodule(obj): + return 'module' + elif inspect.isbuiltin(obj) or inspect.ismethod(obj) \ + or inspect.ismethoddescriptor(obj) or inspect.isfunction(obj): + return 'function' + # Everything else... + return 'instance' + + +class DirectObjectAccess: + def __init__(self, inference_state, obj): + self._inference_state = inference_state + self._obj = obj + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, self.get_repr()) + + def _create_access(self, obj): + return create_access(self._inference_state, obj) + + def _create_access_path(self, obj): + return create_access_path(self._inference_state, obj) + + def py__bool__(self): + return bool(self._obj) + + def py__file__(self) -> Optional[Path]: + try: + return Path(self._obj.__file__) + except AttributeError: + return None + + def py__doc__(self): + return inspect.getdoc(self._obj) or '' + + def py__name__(self): + if not _is_class_instance(self._obj) or \ + inspect.ismethoddescriptor(self._obj): # slots + cls = self._obj + else: + try: + cls = self._obj.__class__ + except AttributeError: + # happens with numpy.core.umath._UFUNC_API (you get it + # automatically by doing `import numpy`. + return None + + try: + return cls.__name__ + except AttributeError: + return None + + def py__mro__accesses(self): + return tuple(self._create_access_path(cls) for cls in self._obj.__mro__[1:]) + + def py__getitem__all_values(self): + if isinstance(self._obj, dict): + return [self._create_access_path(v) for v in self._obj.values()] + if isinstance(self._obj, (list, tuple)): + return [self._create_access_path(v) for v in self._obj] + + if self.is_instance(): + cls = DirectObjectAccess(self._inference_state, self._obj.__class__) + return cls.py__getitem__all_values() + + try: + getitem = self._obj.__getitem__ + except AttributeError: + pass + else: + annotation = DirectObjectAccess(self._inference_state, getitem).get_return_annotation() + if annotation is not None: + return [annotation] + return None + + def py__simple_getitem__(self, index): + if type(self._obj) not in ALLOWED_GETITEM_TYPES: + # Get rid of side effects, we won't call custom `__getitem__`s. + return None + + return self._create_access_path(self._obj[index]) + + def py__iter__list(self): + try: + iter_method = self._obj.__iter__ + except AttributeError: + return None + else: + p = DirectObjectAccess(self._inference_state, iter_method).get_return_annotation() + if p is not None: + return [p] + + if type(self._obj) not in ALLOWED_GETITEM_TYPES: + # Get rid of side effects, we won't call custom `__getitem__`s. + return [] + + lst = [] + for i, part in enumerate(self._obj): + if i > 20: + # Should not go crazy with large iterators + break + lst.append(self._create_access_path(part)) + return lst + + def py__class__(self): + return self._create_access_path(self._obj.__class__) + + def py__bases__(self): + return [self._create_access_path(base) for base in self._obj.__bases__] + + def py__path__(self): + paths = getattr(self._obj, '__path__', None) + # Avoid some weird hacks that would just fail, because they cannot be + # used by pickle. + if not isinstance(paths, list) \ + or not all(isinstance(p, str) for p in paths): + return None + return paths + + @shorten_repr + def get_repr(self): + if inspect.ismodule(self._obj): + return repr(self._obj) + # Try to avoid execution of the property. + if safe_getattr(self._obj, '__module__', default='') == 'builtins': + return repr(self._obj) + + type_ = type(self._obj) + if type_ == type: + return type.__repr__(self._obj) + + if safe_getattr(type_, '__module__', default='') == 'builtins': + # Allow direct execution of repr for builtins. + return repr(self._obj) + return object.__repr__(self._obj) + + def is_class(self): + return inspect.isclass(self._obj) + + def is_function(self): + return inspect.isfunction(self._obj) or inspect.ismethod(self._obj) + + def is_module(self): + return inspect.ismodule(self._obj) + + def is_instance(self): + return _is_class_instance(self._obj) + + def ismethoddescriptor(self): + return inspect.ismethoddescriptor(self._obj) + + def get_qualified_names(self): + def try_to_get_name(obj): + return getattr(obj, '__qualname__', getattr(obj, '__name__', None)) + + if self.is_module(): + return () + name = try_to_get_name(self._obj) + if name is None: + name = try_to_get_name(type(self._obj)) + if name is None: + return () + return tuple(name.split('.')) + + def dir(self): + return dir(self._obj) + + def has_iter(self): + try: + iter(self._obj) + return True + except TypeError: + return False + + def is_allowed_getattr(self, name, safe=True): + # TODO this API is ugly. + if not safe: + # Unsafe is mostly used to check for __getattr__/__getattribute__. + # getattr_static works for properties, but the underscore methods + # are just ignored (because it's safer and avoids more code + # execution). See also GH #1378. + + # Avoid warnings, see comment in the next function. + with warnings.catch_warnings(record=True): + warnings.simplefilter("always") + try: + return hasattr(self._obj, name), False + except Exception: + # Obviously has an attribute (propably a property) that + # gets executed, so just avoid all exceptions here. + return False, False + try: + attr, is_get_descriptor = getattr_static(self._obj, name) + except AttributeError: + return False, False + else: + if is_get_descriptor and type(attr) not in ALLOWED_DESCRIPTOR_ACCESS: + # In case of descriptors that have get methods we cannot return + # it's value, because that would mean code execution. + return True, True + return True, False + + def getattr_paths(self, name, default=_sentinel): + try: + # Make sure no warnings are printed here, this is autocompletion, + # warnings should not be shown. See also GH #1383. + with warnings.catch_warnings(record=True): + warnings.simplefilter("always") + return_obj = getattr(self._obj, name) + except Exception as e: + if default is _sentinel: + if isinstance(e, AttributeError): + # Happens e.g. in properties of + # PyQt4.QtGui.QStyleOptionComboBox.currentText + # -> just set it to None + raise + # Just in case anything happens, return an AttributeError. It + # should not crash. + raise AttributeError + return_obj = default + access = self._create_access(return_obj) + if inspect.ismodule(return_obj): + return [access] + + try: + module = return_obj.__module__ + except AttributeError: + pass + else: + if module is not None and isinstance(module, str): + try: + __import__(module) + # For some modules like _sqlite3, the __module__ for classes is + # different, in this case it's sqlite3. So we have to try to + # load that "original" module, because it's not loaded yet. If + # we don't do that, we don't really have a "parent" module and + # we would fall back to builtins. + except ImportError: + pass + + module = inspect.getmodule(return_obj) + if module is None: + module = inspect.getmodule(type(return_obj)) + if module is None: + module = builtins + return [self._create_access(module), access] + + def get_safe_value(self): + if type(self._obj) in (bool, bytes, float, int, str, slice) or self._obj is None: + return self._obj + raise ValueError("Object is type %s and not simple" % type(self._obj)) + + def get_api_type(self): + return get_api_type(self._obj) + + def get_array_type(self): + if isinstance(self._obj, dict): + return 'dict' + return None + + def get_key_paths(self): + def iter_partial_keys(): + # We could use list(keys()), but that might take a lot more memory. + for (i, k) in enumerate(self._obj.keys()): + # Limit key listing at some point. This is artificial, but this + # way we don't get stalled because of slow completions + if i > 50: + break + yield k + + return [self._create_access_path(k) for k in iter_partial_keys()] + + def get_access_path_tuples(self): + accesses = [create_access(self._inference_state, o) for o in self._get_objects_path()] + return [(access.py__name__(), access) for access in accesses] + + def _get_objects_path(self): + def get(): + obj = self._obj + yield obj + try: + obj = obj.__objclass__ + except AttributeError: + pass + else: + yield obj + + try: + # Returns a dotted string path. + imp_plz = obj.__module__ + except AttributeError: + # Unfortunately in some cases like `int` there's no __module__ + if not inspect.ismodule(obj): + yield builtins + else: + if imp_plz is None: + # Happens for example in `(_ for _ in []).send.__module__`. + yield builtins + else: + try: + yield sys.modules[imp_plz] + except KeyError: + # __module__ can be something arbitrary that doesn't exist. + yield builtins + + return list(reversed(list(get()))) + + def execute_operation(self, other_access_handle, operator): + other_access = other_access_handle.access + op = _OPERATORS[operator] + return self._create_access_path(op(self._obj, other_access._obj)) + + def get_annotation_name_and_args(self): + """ + Returns Tuple[Optional[str], Tuple[AccessPath, ...]] + """ + name = None + args = () + if safe_getattr(self._obj, '__module__', default='') == 'typing': + m = re.match(r'typing.(\w+)\[', repr(self._obj)) + if m is not None: + name = m.group(1) + + import typing + if sys.version_info >= (3, 8): + args = typing.get_args(self._obj) + else: + args = safe_getattr(self._obj, '__args__', default=None) + return name, tuple(self._create_access_path(arg) for arg in args) + + def needs_type_completions(self): + return inspect.isclass(self._obj) and self._obj != type + + def _annotation_to_str(self, annotation): + return inspect.formatannotation(annotation) + + def get_signature_params(self): + return [ + SignatureParam( + name=p.name, + has_default=p.default is not p.empty, + default=self._create_access_path(p.default), + default_string=repr(p.default), + has_annotation=p.annotation is not p.empty, + annotation=self._create_access_path(p.annotation), + annotation_string=self._annotation_to_str(p.annotation), + kind_name=str(p.kind) + ) for p in self._get_signature().parameters.values() + ] + + def _get_signature(self): + obj = self._obj + try: + return inspect.signature(obj) + except (RuntimeError, TypeError): + # Reading the code of the function in Python 3.6 implies there are + # at least these errors that might occur if something is wrong with + # the signature. In that case we just want a simple escape for now. + raise ValueError + + def get_return_annotation(self): + try: + o = self._obj.__annotations__.get('return') + except AttributeError: + return None + + if o is None: + return None + + try: + o = typing.get_type_hints(self._obj).get('return') + except Exception: + pass + + return self._create_access_path(o) + + def negate(self): + return self._create_access_path(-self._obj) + + def get_dir_infos(self): + """ + Used to return a couple of infos that are needed when accessing the sub + objects of an objects + """ + tuples = dict( + (name, self.is_allowed_getattr(name)) + for name in self.dir() + ) + return self.needs_type_completions(), tuples + + +def _is_class_instance(obj): + """Like inspect.* methods.""" + try: + cls = obj.__class__ + except AttributeError: + return False + else: + # The isinstance check for cls is just there so issubclass doesn't + # raise an exception. + return cls != type and isinstance(cls, type) and not issubclass(cls, NOT_CLASS_TYPES) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/getattr_static.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/getattr_static.py new file mode 100644 index 000000000..03c199eff --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/getattr_static.py @@ -0,0 +1,121 @@ +""" +A static version of getattr. +This is a backport of the Python 3 code with a little bit of additional +information returned to enable Jedi to make decisions. +""" + +import types + +from jedi import debug + +_sentinel = object() + + +def _check_instance(obj, attr): + instance_dict = {} + try: + instance_dict = object.__getattribute__(obj, "__dict__") + except AttributeError: + pass + return dict.get(instance_dict, attr, _sentinel) + + +def _check_class(klass, attr): + for entry in _static_getmro(klass): + if _shadowed_dict(type(entry)) is _sentinel: + try: + return entry.__dict__[attr] + except KeyError: + pass + return _sentinel + + +def _is_type(obj): + try: + _static_getmro(obj) + except TypeError: + return False + return True + + +def _shadowed_dict(klass): + dict_attr = type.__dict__["__dict__"] + for entry in _static_getmro(klass): + try: + class_dict = dict_attr.__get__(entry)["__dict__"] + except KeyError: + pass + else: + if not (type(class_dict) is types.GetSetDescriptorType + and class_dict.__name__ == "__dict__" + and class_dict.__objclass__ is entry): + return class_dict + return _sentinel + + +def _static_getmro(klass): + mro = type.__dict__['__mro__'].__get__(klass) + if not isinstance(mro, (tuple, list)): + # There are unfortunately no tests for this, I was not able to + # reproduce this in pure Python. However should still solve the issue + # raised in GH #1517. + debug.warning('mro of %s returned %s, should be a tuple' % (klass, mro)) + return () + return mro + + +def _safe_hasattr(obj, name): + return _check_class(type(obj), name) is not _sentinel + + +def _safe_is_data_descriptor(obj): + return _safe_hasattr(obj, '__set__') or _safe_hasattr(obj, '__delete__') + + +def getattr_static(obj, attr, default=_sentinel): + """Retrieve attributes without triggering dynamic lookup via the + descriptor protocol, __getattr__ or __getattribute__. + + Note: this function may not be able to retrieve all attributes + that getattr can fetch (like dynamically created attributes) + and may find attributes that getattr can't (like descriptors + that raise AttributeError). It can also return descriptor objects + instead of instance members in some cases. See the + documentation for details. + + Returns a tuple `(attr, is_get_descriptor)`. is_get_descripter means that + the attribute is a descriptor that has a `__get__` attribute. + """ + instance_result = _sentinel + if not _is_type(obj): + klass = type(obj) + dict_attr = _shadowed_dict(klass) + if (dict_attr is _sentinel or type(dict_attr) is types.MemberDescriptorType): + instance_result = _check_instance(obj, attr) + else: + klass = obj + + klass_result = _check_class(klass, attr) + + if instance_result is not _sentinel and klass_result is not _sentinel: + if _safe_hasattr(klass_result, '__get__') \ + and _safe_is_data_descriptor(klass_result): + # A get/set descriptor has priority over everything. + return klass_result, True + + if instance_result is not _sentinel: + return instance_result, False + if klass_result is not _sentinel: + return klass_result, _safe_hasattr(klass_result, '__get__') + + if obj is klass: + # for types we check the metaclass too + for entry in _static_getmro(type(klass)): + if _shadowed_dict(type(entry)) is _sentinel: + try: + return entry.__dict__[attr], False + except KeyError: + pass + if default is not _sentinel: + return default, False + raise AttributeError(attr) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/mixed.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/mixed.py new file mode 100644 index 000000000..b323d70ef --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/mixed.py @@ -0,0 +1,309 @@ +""" +Used only for REPL Completion. +""" + +import inspect +from pathlib import Path + +from jedi.parser_utils import get_cached_code_lines + +from jedi import settings +from jedi.cache import memoize_method +from jedi.inference import compiled +from jedi.file_io import FileIO +from jedi.inference.names import NameWrapper +from jedi.inference.base_value import ValueSet, ValueWrapper, NO_VALUES +from jedi.inference.value import ModuleValue +from jedi.inference.cache import inference_state_function_cache, \ + inference_state_method_cache +from jedi.inference.compiled.access import ALLOWED_GETITEM_TYPES, get_api_type +from jedi.inference.gradual.conversion import to_stub +from jedi.inference.context import CompiledContext, CompiledModuleContext, \ + TreeContextMixin + +_sentinel = object() + + +class MixedObject(ValueWrapper): + """ + A ``MixedObject`` is used in two ways: + + 1. It uses the default logic of ``parser.python.tree`` objects, + 2. except for getattr calls and signatures. The names dicts are generated + in a fashion like ``CompiledValue``. + + This combined logic makes it possible to provide more powerful REPL + completion. It allows side effects that are not noticable with the default + parser structure to still be completeable. + + The biggest difference from CompiledValue to MixedObject is that we are + generally dealing with Python code and not with C code. This will generate + fewer special cases, because we in Python you don't have the same freedoms + to modify the runtime. + """ + def __init__(self, compiled_value, tree_value): + super().__init__(tree_value) + self.compiled_value = compiled_value + self.access_handle = compiled_value.access_handle + + def get_filters(self, *args, **kwargs): + yield MixedObjectFilter( + self.inference_state, self.compiled_value, self._wrapped_value) + + def get_signatures(self): + # Prefer `inspect.signature` over somehow analyzing Python code. It + # should be very precise, especially for stuff like `partial`. + return self.compiled_value.get_signatures() + + @inference_state_method_cache(default=NO_VALUES) + def py__call__(self, arguments): + # Fallback to the wrapped value if to stub returns no values. + values = to_stub(self._wrapped_value) + if not values: + values = self._wrapped_value + return values.py__call__(arguments) + + def get_safe_value(self, default=_sentinel): + if default is _sentinel: + return self.compiled_value.get_safe_value() + else: + return self.compiled_value.get_safe_value(default) + + @property + def array_type(self): + return self.compiled_value.array_type + + def get_key_values(self): + return self.compiled_value.get_key_values() + + def py__simple_getitem__(self, index): + python_object = self.compiled_value.access_handle.access._obj + if type(python_object) in ALLOWED_GETITEM_TYPES: + return self.compiled_value.py__simple_getitem__(index) + return self._wrapped_value.py__simple_getitem__(index) + + def negate(self): + return self.compiled_value.negate() + + def _as_context(self): + if self.parent_context is None: + return MixedModuleContext(self) + return MixedContext(self) + + def __repr__(self): + return '<%s: %s; %s>' % ( + type(self).__name__, + self.access_handle.get_repr(), + self._wrapped_value, + ) + + +class MixedContext(CompiledContext, TreeContextMixin): + @property + def compiled_value(self): + return self._value.compiled_value + + +class MixedModuleContext(CompiledModuleContext, MixedContext): + pass + + +class MixedName(NameWrapper): + """ + The ``CompiledName._compiled_value`` is our MixedObject. + """ + def __init__(self, wrapped_name, parent_tree_value): + super().__init__(wrapped_name) + self._parent_tree_value = parent_tree_value + + @property + def start_pos(self): + values = list(self.infer()) + if not values: + # This means a start_pos that doesn't exist (compiled objects). + return 0, 0 + return values[0].name.start_pos + + @memoize_method + def infer(self): + compiled_value = self._wrapped_name.infer_compiled_value() + tree_value = self._parent_tree_value + if tree_value.is_instance() or tree_value.is_class(): + tree_values = tree_value.py__getattribute__(self.string_name) + if compiled_value.is_function(): + return ValueSet({MixedObject(compiled_value, v) for v in tree_values}) + + module_context = tree_value.get_root_context() + return _create(self._inference_state, compiled_value, module_context) + + +class MixedObjectFilter(compiled.CompiledValueFilter): + def __init__(self, inference_state, compiled_value, tree_value): + super().__init__(inference_state, compiled_value) + self._tree_value = tree_value + + def _create_name(self, name): + return MixedName( + super()._create_name(name), + self._tree_value, + ) + + +@inference_state_function_cache() +def _load_module(inference_state, path): + return inference_state.parse( + path=path, + cache=True, + diff_cache=settings.fast_parser, + cache_path=settings.cache_directory + ).get_root_node() + + +def _get_object_to_check(python_object): + """Check if inspect.getfile has a chance to find the source.""" + try: + python_object = inspect.unwrap(python_object) + except ValueError: + # Can return a ValueError when it wraps around + pass + + if (inspect.ismodule(python_object) + or inspect.isclass(python_object) + or inspect.ismethod(python_object) + or inspect.isfunction(python_object) + or inspect.istraceback(python_object) + or inspect.isframe(python_object) + or inspect.iscode(python_object)): + return python_object + + try: + return python_object.__class__ + except AttributeError: + raise TypeError # Prevents computation of `repr` within inspect. + + +def _find_syntax_node_name(inference_state, python_object): + original_object = python_object + try: + python_object = _get_object_to_check(python_object) + path = inspect.getsourcefile(python_object) + except (OSError, TypeError): + # The type might not be known (e.g. class_with_dict.__weakref__) + return None + path = None if path is None else Path(path) + try: + if path is None or not path.exists(): + # The path might not exist or be e.g. . + return None + except OSError: + # Might raise an OSError on Windows: + # + # [WinError 123] The filename, directory name, or volume label + # syntax is incorrect: '' + return None + + file_io = FileIO(path) + module_node = _load_module(inference_state, path) + + if inspect.ismodule(python_object): + # We don't need to check names for modules, because there's not really + # a way to write a module in a module in Python (and also __name__ can + # be something like ``email.utils``). + code_lines = get_cached_code_lines(inference_state.grammar, path) + return module_node, module_node, file_io, code_lines + + try: + name_str = python_object.__name__ + except AttributeError: + # Stuff like python_function.__code__. + return None + + if name_str == '': + return None # It's too hard to find lambdas. + + # Doesn't always work (e.g. os.stat_result) + names = module_node.get_used_names().get(name_str, []) + # Only functions and classes are relevant. If a name e.g. points to an + # import, it's probably a builtin (like collections.deque) and needs to be + # ignored. + names = [ + n for n in names + if n.parent.type in ('funcdef', 'classdef') and n.parent.name == n + ] + if not names: + return None + + try: + code = python_object.__code__ + # By using the line number of a code object we make the lookup in a + # file pretty easy. There's still a possibility of people defining + # stuff like ``a = 3; foo(a); a = 4`` on the same line, but if people + # do so we just don't care. + line_nr = code.co_firstlineno + except AttributeError: + pass + else: + line_names = [name for name in names if name.start_pos[0] == line_nr] + # There's a chance that the object is not available anymore, because + # the code has changed in the background. + if line_names: + names = line_names + + code_lines = get_cached_code_lines(inference_state.grammar, path) + # It's really hard to actually get the right definition, here as a last + # resort we just return the last one. This chance might lead to odd + # completions at some points but will lead to mostly correct type + # inference, because people tend to define a public name in a module only + # once. + tree_node = names[-1].parent + if tree_node.type == 'funcdef' and get_api_type(original_object) == 'instance': + # If an instance is given and we're landing on a function (e.g. + # partial in 3.5), something is completely wrong and we should not + # return that. + return None + return module_node, tree_node, file_io, code_lines + + +@inference_state_function_cache() +def _create(inference_state, compiled_value, module_context): + # TODO accessing this is bad, but it probably doesn't matter that much, + # because we're working with interpreteters only here. + python_object = compiled_value.access_handle.access._obj + result = _find_syntax_node_name(inference_state, python_object) + if result is None: + # TODO Care about generics from stuff like `[1]` and don't return like this. + if type(python_object) in (dict, list, tuple): + return ValueSet({compiled_value}) + + tree_values = to_stub(compiled_value) + if not tree_values: + return ValueSet({compiled_value}) + else: + module_node, tree_node, file_io, code_lines = result + + if module_context is None or module_context.tree_node != module_node: + root_compiled_value = compiled_value.get_root_context().get_value() + # TODO this __name__ might be wrong. + name = root_compiled_value.py__name__() + string_names = tuple(name.split('.')) + module_value = ModuleValue( + inference_state, module_node, + file_io=file_io, + string_names=string_names, + code_lines=code_lines, + is_package=root_compiled_value.is_package(), + ) + if name is not None: + inference_state.module_cache.add(string_names, ValueSet([module_value])) + module_context = module_value.as_context() + + tree_values = ValueSet({module_context.create_value(tree_node)}) + if tree_node.type == 'classdef': + if not compiled_value.is_class(): + # Is an instance, not a class. + tree_values = tree_values.execute_with_values() + + return ValueSet( + MixedObject(compiled_value, tree_value=tree_value) + for tree_value in tree_values + ) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/subprocess/__init__.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/subprocess/__init__.py new file mode 100644 index 000000000..cd5fe74c8 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/subprocess/__init__.py @@ -0,0 +1,391 @@ +""" +Makes it possible to do the compiled analysis in a subprocess. This has two +goals: + +1. Making it safer - Segfaults and RuntimeErrors as well as stdout/stderr can + be ignored and dealt with. +2. Make it possible to handle different Python versions as well as virtualenvs. +""" + +import collections +import os +import sys +import queue +import subprocess +import traceback +import weakref +from functools import partial +from threading import Thread + +from jedi._compatibility import pickle_dump, pickle_load +from jedi import debug +from jedi.cache import memoize_method +from jedi.inference.compiled.subprocess import functions +from jedi.inference.compiled.access import DirectObjectAccess, AccessPath, \ + SignatureParam +from jedi.api.exceptions import InternalError + + +_MAIN_PATH = os.path.join(os.path.dirname(__file__), '__main__.py') +PICKLE_PROTOCOL = 4 + + +def _GeneralizedPopen(*args, **kwargs): + if os.name == 'nt': + try: + # Was introduced in Python 3.7. + CREATE_NO_WINDOW = subprocess.CREATE_NO_WINDOW + except AttributeError: + CREATE_NO_WINDOW = 0x08000000 + kwargs['creationflags'] = CREATE_NO_WINDOW + # The child process doesn't need file descriptors except 0, 1, 2. + # This is unix only. + kwargs['close_fds'] = 'posix' in sys.builtin_module_names + + return subprocess.Popen(*args, **kwargs) + + +def _enqueue_output(out, queue_): + for line in iter(out.readline, b''): + queue_.put(line) + + +def _add_stderr_to_debug(stderr_queue): + while True: + # Try to do some error reporting from the subprocess and print its + # stderr contents. + try: + line = stderr_queue.get_nowait() + line = line.decode('utf-8', 'replace') + debug.warning('stderr output: %s' % line.rstrip('\n')) + except queue.Empty: + break + + +def _get_function(name): + return getattr(functions, name) + + +def _cleanup_process(process, thread): + try: + process.kill() + process.wait() + except OSError: + # Raised if the process is already killed. + pass + thread.join() + for stream in [process.stdin, process.stdout, process.stderr]: + try: + stream.close() + except OSError: + # Raised if the stream is broken. + pass + + +class _InferenceStateProcess: + def __init__(self, inference_state): + self._inference_state_weakref = weakref.ref(inference_state) + self._inference_state_id = id(inference_state) + self._handles = {} + + def get_or_create_access_handle(self, obj): + id_ = id(obj) + try: + return self.get_access_handle(id_) + except KeyError: + access = DirectObjectAccess(self._inference_state_weakref(), obj) + handle = AccessHandle(self, access, id_) + self.set_access_handle(handle) + return handle + + def get_access_handle(self, id_): + return self._handles[id_] + + def set_access_handle(self, handle): + self._handles[handle.id] = handle + + +class InferenceStateSameProcess(_InferenceStateProcess): + """ + Basically just an easy access to functions.py. It has the same API + as InferenceStateSubprocess and does the same thing without using a subprocess. + This is necessary for the Interpreter process. + """ + def __getattr__(self, name): + return partial(_get_function(name), self._inference_state_weakref()) + + +class InferenceStateSubprocess(_InferenceStateProcess): + def __init__(self, inference_state, compiled_subprocess): + super().__init__(inference_state) + self._used = False + self._compiled_subprocess = compiled_subprocess + + def __getattr__(self, name): + func = _get_function(name) + + def wrapper(*args, **kwargs): + self._used = True + + result = self._compiled_subprocess.run( + self._inference_state_weakref(), + func, + args=args, + kwargs=kwargs, + ) + # IMO it should be possible to create a hook in pickle.load to + # mess with the loaded objects. However it's extremely complicated + # to work around this so just do it with this call. ~ dave + return self._convert_access_handles(result) + + return wrapper + + def _convert_access_handles(self, obj): + if isinstance(obj, SignatureParam): + return SignatureParam(*self._convert_access_handles(tuple(obj))) + elif isinstance(obj, tuple): + return tuple(self._convert_access_handles(o) for o in obj) + elif isinstance(obj, list): + return [self._convert_access_handles(o) for o in obj] + elif isinstance(obj, AccessHandle): + try: + # Rewrite the access handle to one we're already having. + obj = self.get_access_handle(obj.id) + except KeyError: + obj.add_subprocess(self) + self.set_access_handle(obj) + elif isinstance(obj, AccessPath): + return AccessPath(self._convert_access_handles(obj.accesses)) + return obj + + def __del__(self): + if self._used and not self._compiled_subprocess.is_crashed: + self._compiled_subprocess.delete_inference_state(self._inference_state_id) + + +class CompiledSubprocess: + is_crashed = False + + def __init__(self, executable, env_vars=None): + self._executable = executable + self._env_vars = env_vars + self._inference_state_deletion_queue = collections.deque() + self._cleanup_callable = lambda: None + + def __repr__(self): + pid = os.getpid() + return '<%s _executable=%r, is_crashed=%r, pid=%r>' % ( + self.__class__.__name__, + self._executable, + self.is_crashed, + pid, + ) + + @memoize_method + def _get_process(self): + debug.dbg('Start environment subprocess %s', self._executable) + parso_path = sys.modules['parso'].__file__ + args = ( + self._executable, + _MAIN_PATH, + os.path.dirname(os.path.dirname(parso_path)), + '.'.join(str(x) for x in sys.version_info[:3]), + ) + process = _GeneralizedPopen( + args, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=self._env_vars + ) + self._stderr_queue = queue.Queue() + self._stderr_thread = t = Thread( + target=_enqueue_output, + args=(process.stderr, self._stderr_queue) + ) + t.daemon = True + t.start() + # Ensure the subprocess is properly cleaned up when the object + # is garbage collected. + self._cleanup_callable = weakref.finalize(self, + _cleanup_process, + process, + t) + return process + + def run(self, inference_state, function, args=(), kwargs={}): + # Delete old inference_states. + while True: + try: + inference_state_id = self._inference_state_deletion_queue.pop() + except IndexError: + break + else: + self._send(inference_state_id, None) + + assert callable(function) + return self._send(id(inference_state), function, args, kwargs) + + def get_sys_path(self): + return self._send(None, functions.get_sys_path, (), {}) + + def _kill(self): + self.is_crashed = True + self._cleanup_callable() + + def _send(self, inference_state_id, function, args=(), kwargs={}): + if self.is_crashed: + raise InternalError("The subprocess %s has crashed." % self._executable) + + data = inference_state_id, function, args, kwargs + try: + pickle_dump(data, self._get_process().stdin, PICKLE_PROTOCOL) + except BrokenPipeError: + self._kill() + raise InternalError("The subprocess %s was killed. Maybe out of memory?" + % self._executable) + + try: + is_exception, traceback, result = pickle_load(self._get_process().stdout) + except EOFError as eof_error: + try: + stderr = self._get_process().stderr.read().decode('utf-8', 'replace') + except Exception as exc: + stderr = '' % exc + self._kill() + _add_stderr_to_debug(self._stderr_queue) + raise InternalError( + "The subprocess %s has crashed (%r, stderr=%s)." % ( + self._executable, + eof_error, + stderr, + )) + + _add_stderr_to_debug(self._stderr_queue) + + if is_exception: + # Replace the attribute error message with a the traceback. It's + # way more informative. + result.args = (traceback,) + raise result + return result + + def delete_inference_state(self, inference_state_id): + """ + Currently we are not deleting inference_state instantly. They only get + deleted once the subprocess is used again. It would probably a better + solution to move all of this into a thread. However, the memory usage + of a single inference_state shouldn't be that high. + """ + # With an argument - the inference_state gets deleted. + self._inference_state_deletion_queue.append(inference_state_id) + + +class Listener: + def __init__(self): + self._inference_states = {} + # TODO refactor so we don't need to process anymore just handle + # controlling. + self._process = _InferenceStateProcess(Listener) + + def _get_inference_state(self, function, inference_state_id): + from jedi.inference import InferenceState + + try: + inference_state = self._inference_states[inference_state_id] + except KeyError: + from jedi import InterpreterEnvironment + inference_state = InferenceState( + # The project is not actually needed. Nothing should need to + # access it. + project=None, + environment=InterpreterEnvironment() + ) + self._inference_states[inference_state_id] = inference_state + return inference_state + + def _run(self, inference_state_id, function, args, kwargs): + if inference_state_id is None: + return function(*args, **kwargs) + elif function is None: + del self._inference_states[inference_state_id] + else: + inference_state = self._get_inference_state(function, inference_state_id) + + # Exchange all handles + args = list(args) + for i, arg in enumerate(args): + if isinstance(arg, AccessHandle): + args[i] = inference_state.compiled_subprocess.get_access_handle(arg.id) + for key, value in kwargs.items(): + if isinstance(value, AccessHandle): + kwargs[key] = inference_state.compiled_subprocess.get_access_handle(value.id) + + return function(inference_state, *args, **kwargs) + + def listen(self): + stdout = sys.stdout + # Mute stdout. Nobody should actually be able to write to it, + # because stdout is used for IPC. + sys.stdout = open(os.devnull, 'w') + stdin = sys.stdin + stdout = stdout.buffer + stdin = stdin.buffer + + while True: + try: + payload = pickle_load(stdin) + except EOFError: + # It looks like the parent process closed. + # Don't make a big fuss here and just exit. + exit(0) + try: + result = False, None, self._run(*payload) + except Exception as e: + result = True, traceback.format_exc(), e + + pickle_dump(result, stdout, PICKLE_PROTOCOL) + + +class AccessHandle: + def __init__(self, subprocess, access, id_): + self.access = access + self._subprocess = subprocess + self.id = id_ + + def add_subprocess(self, subprocess): + self._subprocess = subprocess + + def __repr__(self): + try: + detail = self.access + except AttributeError: + detail = '#' + str(self.id) + return '<%s of %s>' % (self.__class__.__name__, detail) + + def __getstate__(self): + return self.id + + def __setstate__(self, state): + self.id = state + + def __getattr__(self, name): + if name in ('id', 'access') or name.startswith('_'): + raise AttributeError("Something went wrong with unpickling") + + # print('getattr', name, file=sys.stderr) + return partial(self._workaround, name) + + def _workaround(self, name, *args, **kwargs): + """ + TODO Currently we're passing slice objects around. This should not + happen. They are also the only unhashable objects that we're passing + around. + """ + if args and isinstance(args[0], slice): + return self._subprocess.get_compiled_method_return(self.id, name, *args, **kwargs) + return self._cached_results(name, *args, **kwargs) + + @memoize_method + def _cached_results(self, name, *args, **kwargs): + return self._subprocess.get_compiled_method_return(self.id, name, *args, **kwargs) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/subprocess/__main__.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/subprocess/__main__.py new file mode 100644 index 000000000..e15543348 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/subprocess/__main__.py @@ -0,0 +1,40 @@ +import os +import sys +from importlib.abc import MetaPathFinder +from importlib.machinery import PathFinder + +# Remove the first entry, because it's simply a directory entry that equals +# this directory. +del sys.path[0] + + +def _get_paths(): + # Get the path to jedi. + _d = os.path.dirname + _jedi_path = _d(_d(_d(_d(_d(__file__))))) + _parso_path = sys.argv[1] + # The paths are the directory that jedi and parso lie in. + return {'jedi': _jedi_path, 'parso': _parso_path} + + +class _ExactImporter(MetaPathFinder): + def __init__(self, path_dct): + self._path_dct = path_dct + + def find_module(self, fullname, path=None): + if path is None and fullname in self._path_dct: + p = self._path_dct[fullname] + loader = PathFinder.find_module(fullname, path=[p]) + return loader + return None + + +# Try to import jedi/parso. +sys.meta_path.insert(0, _ExactImporter(_get_paths())) +from jedi.inference.compiled import subprocess # noqa: E402 +sys.meta_path.pop(0) + +# Retrieve the pickle protocol. +host_sys_version = [int(x) for x in sys.argv[2].split('.')] +# And finally start the client. +subprocess.Listener().listen() diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/subprocess/functions.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/subprocess/functions.py new file mode 100644 index 000000000..5070c6643 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/subprocess/functions.py @@ -0,0 +1,255 @@ +import sys +import os +import inspect +import importlib +import warnings +from pathlib import Path +from zipfile import ZipFile +from zipimport import zipimporter, ZipImportError +from importlib.machinery import all_suffixes + +from jedi.inference.compiled import access +from jedi import debug +from jedi import parser_utils +from jedi.file_io import KnownContentFileIO, ZipFileIO + + +def get_sys_path(): + return sys.path + + +def load_module(inference_state, **kwargs): + return access.load_module(inference_state, **kwargs) + + +def get_compiled_method_return(inference_state, id, attribute, *args, **kwargs): + handle = inference_state.compiled_subprocess.get_access_handle(id) + return getattr(handle.access, attribute)(*args, **kwargs) + + +def create_simple_object(inference_state, obj): + return access.create_access_path(inference_state, obj) + + +def get_module_info(inference_state, sys_path=None, full_name=None, **kwargs): + """ + Returns Tuple[Union[NamespaceInfo, FileIO, None], Optional[bool]] + """ + if sys_path is not None: + sys.path, temp = sys_path, sys.path + try: + return _find_module(full_name=full_name, **kwargs) + except ImportError: + return None, None + finally: + if sys_path is not None: + sys.path = temp + + +def get_builtin_module_names(inference_state): + return sys.builtin_module_names + + +def _test_raise_error(inference_state, exception_type): + """ + Raise an error to simulate certain problems for unit tests. + """ + raise exception_type + + +def _test_print(inference_state, stderr=None, stdout=None): + """ + Force some prints in the subprocesses. This exists for unit tests. + """ + if stderr is not None: + print(stderr, file=sys.stderr) + sys.stderr.flush() + if stdout is not None: + print(stdout) + sys.stdout.flush() + + +def _get_init_path(directory_path): + """ + The __init__ file can be searched in a directory. If found return it, else + None. + """ + for suffix in all_suffixes(): + path = os.path.join(directory_path, '__init__' + suffix) + if os.path.exists(path): + return path + return None + + +def safe_literal_eval(inference_state, value): + return parser_utils.safe_literal_eval(value) + + +def iter_module_names(*args, **kwargs): + return list(_iter_module_names(*args, **kwargs)) + + +def _iter_module_names(inference_state, paths): + # Python modules/packages + for path in paths: + try: + dir_entries = ((entry.name, entry.is_dir()) for entry in os.scandir(path)) + except OSError: + try: + zip_import_info = zipimporter(path) + # Unfortunately, there is no public way to access zipimporter's + # private _files member. We therefore have to use a + # custom function to iterate over the files. + dir_entries = _zip_list_subdirectory( + zip_import_info.archive, zip_import_info.prefix) + except ZipImportError: + # The file might not exist or reading it might lead to an error. + debug.warning("Not possible to list directory: %s", path) + continue + for name, is_dir in dir_entries: + # First Namespaces then modules/stubs + if is_dir: + # pycache is obviously not an interesting namespace. Also the + # name must be a valid identifier. + if name != '__pycache__' and name.isidentifier(): + yield name + else: + if name.endswith('.pyi'): # Stub files + modname = name[:-4] + else: + modname = inspect.getmodulename(name) + + if modname and '.' not in modname: + if modname != '__init__': + yield modname + + +def _find_module(string, path=None, full_name=None, is_global_search=True): + """ + Provides information about a module. + + This function isolates the differences in importing libraries introduced with + python 3.3 on; it gets a module name and optionally a path. It will return a + tuple containin an open file for the module (if not builtin), the filename + or the name of the module if it is a builtin one and a boolean indicating + if the module is contained in a package. + """ + spec = None + loader = None + + for finder in sys.meta_path: + if is_global_search and finder != importlib.machinery.PathFinder: + p = None + else: + p = path + try: + find_spec = finder.find_spec + except AttributeError: + # These are old-school clases that still have a different API, just + # ignore those. + continue + + spec = find_spec(string, p) + if spec is not None: + loader = spec.loader + if loader is None and not spec.has_location: + # This is a namespace package. + full_name = string if not path else full_name + implicit_ns_info = ImplicitNSInfo(full_name, spec.submodule_search_locations._path) + return implicit_ns_info, True + break + + return _find_module_py33(string, path, loader) + + +def _find_module_py33(string, path=None, loader=None, full_name=None, is_global_search=True): + loader = loader or importlib.machinery.PathFinder.find_module(string, path) + + if loader is None and path is None: # Fallback to find builtins + try: + with warnings.catch_warnings(record=True): + # Mute "DeprecationWarning: Use importlib.util.find_spec() + # instead." While we should replace that in the future, it's + # probably good to wait until we deprecate Python 3.3, since + # it was added in Python 3.4 and find_loader hasn't been + # removed in 3.6. + loader = importlib.find_loader(string) + except ValueError as e: + # See #491. Importlib might raise a ValueError, to avoid this, we + # just raise an ImportError to fix the issue. + raise ImportError("Originally " + repr(e)) + + if loader is None: + raise ImportError("Couldn't find a loader for {}".format(string)) + + return _from_loader(loader, string) + + +def _from_loader(loader, string): + try: + is_package_method = loader.is_package + except AttributeError: + is_package = False + else: + is_package = is_package_method(string) + try: + get_filename = loader.get_filename + except AttributeError: + return None, is_package + else: + module_path = get_filename(string) + + # To avoid unicode and read bytes, "overwrite" loader.get_source if + # possible. + try: + f = type(loader).get_source + except AttributeError: + raise ImportError("get_source was not defined on loader") + + if f is not importlib.machinery.SourceFileLoader.get_source: + # Unfortunately we are reading unicode here, not bytes. + # It seems hard to get bytes, because the zip importer + # logic just unpacks the zip file and returns a file descriptor + # that we cannot as easily access. Therefore we just read it as + # a string in the cases where get_source was overwritten. + code = loader.get_source(string) + else: + code = _get_source(loader, string) + + if code is None: + return None, is_package + if isinstance(loader, zipimporter): + return ZipFileIO(module_path, code, Path(loader.archive)), is_package + + return KnownContentFileIO(module_path, code), is_package + + +def _get_source(loader, fullname): + """ + This method is here as a replacement for SourceLoader.get_source. That + method returns unicode, but we prefer bytes. + """ + path = loader.get_filename(fullname) + try: + return loader.get_data(path) + except OSError: + raise ImportError('source not available through get_data()', + name=fullname) + + +def _zip_list_subdirectory(zip_path, zip_subdir_path): + zip_file = ZipFile(zip_path) + zip_subdir_path = Path(zip_subdir_path) + zip_content_file_paths = zip_file.namelist() + for raw_file_name in zip_content_file_paths: + file_path = Path(raw_file_name) + if file_path.parent == zip_subdir_path: + file_path = file_path.relative_to(zip_subdir_path) + yield file_path.name, raw_file_name.endswith("/") + + +class ImplicitNSInfo: + """Stores information returned from an implicit namespace spec""" + def __init__(self, name, paths): + self.name = name + self.paths = paths diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/value.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/value.py new file mode 100644 index 000000000..baad4d639 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/compiled/value.py @@ -0,0 +1,612 @@ +""" +Imitate the parser representation. +""" +import re +from functools import partial +from inspect import Parameter +from pathlib import Path +from typing import Optional + +from jedi import debug +from jedi.inference.utils import to_list +from jedi.cache import memoize_method +from jedi.inference.filters import AbstractFilter +from jedi.inference.names import AbstractNameDefinition, ValueNameMixin, \ + ParamNameInterface +from jedi.inference.base_value import Value, ValueSet, NO_VALUES +from jedi.inference.lazy_value import LazyKnownValue +from jedi.inference.compiled.access import _sentinel +from jedi.inference.cache import inference_state_function_cache +from jedi.inference.helpers import reraise_getitem_errors +from jedi.inference.signature import BuiltinSignature +from jedi.inference.context import CompiledContext, CompiledModuleContext + + +class CheckAttribute: + """Raises :exc:`AttributeError` if the attribute X is not available.""" + def __init__(self, check_name=None): + # Remove the py in front of e.g. py__call__. + self.check_name = check_name + + def __call__(self, func): + self.func = func + if self.check_name is None: + self.check_name = func.__name__[2:] + return self + + def __get__(self, instance, owner): + if instance is None: + return self + + # This might raise an AttributeError. That's wanted. + instance.access_handle.getattr_paths(self.check_name) + return partial(self.func, instance) + + +class CompiledValue(Value): + def __init__(self, inference_state, access_handle, parent_context=None): + super().__init__(inference_state, parent_context) + self.access_handle = access_handle + + def py__call__(self, arguments): + return_annotation = self.access_handle.get_return_annotation() + if return_annotation is not None: + # TODO the return annotation may also be a string. + return create_from_access_path( + self.inference_state, + return_annotation + ).execute_annotation() + + try: + self.access_handle.getattr_paths('__call__') + except AttributeError: + return super().py__call__(arguments) + else: + if self.access_handle.is_class(): + from jedi.inference.value import CompiledInstance + return ValueSet([ + CompiledInstance(self.inference_state, self.parent_context, self, arguments) + ]) + else: + return ValueSet(self._execute_function(arguments)) + + @CheckAttribute() + def py__class__(self): + return create_from_access_path(self.inference_state, self.access_handle.py__class__()) + + @CheckAttribute() + def py__mro__(self): + return (self,) + tuple( + create_from_access_path(self.inference_state, access) + for access in self.access_handle.py__mro__accesses() + ) + + @CheckAttribute() + def py__bases__(self): + return tuple( + create_from_access_path(self.inference_state, access) + for access in self.access_handle.py__bases__() + ) + + def get_qualified_names(self): + return self.access_handle.get_qualified_names() + + def py__bool__(self): + return self.access_handle.py__bool__() + + def is_class(self): + return self.access_handle.is_class() + + def is_function(self): + return self.access_handle.is_function() + + def is_module(self): + return self.access_handle.is_module() + + def is_compiled(self): + return True + + def is_stub(self): + return False + + def is_instance(self): + return self.access_handle.is_instance() + + def py__doc__(self): + return self.access_handle.py__doc__() + + @to_list + def get_param_names(self): + try: + signature_params = self.access_handle.get_signature_params() + except ValueError: # Has no signature + params_str, ret = self._parse_function_doc() + if not params_str: + tokens = [] + else: + tokens = params_str.split(',') + if self.access_handle.ismethoddescriptor(): + tokens.insert(0, 'self') + for p in tokens: + name, _, default = p.strip().partition('=') + yield UnresolvableParamName(self, name, default) + else: + for signature_param in signature_params: + yield SignatureParamName(self, signature_param) + + def get_signatures(self): + _, return_string = self._parse_function_doc() + return [BuiltinSignature(self, return_string)] + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.access_handle.get_repr()) + + @memoize_method + def _parse_function_doc(self): + doc = self.py__doc__() + if doc is None: + return '', '' + + return _parse_function_doc(doc) + + @property + def api_type(self): + return self.access_handle.get_api_type() + + def get_filters(self, is_instance=False, origin_scope=None): + yield self._ensure_one_filter(is_instance) + + @memoize_method + def _ensure_one_filter(self, is_instance): + return CompiledValueFilter(self.inference_state, self, is_instance) + + def py__simple_getitem__(self, index): + with reraise_getitem_errors(IndexError, KeyError, TypeError): + try: + access = self.access_handle.py__simple_getitem__(index) + except AttributeError: + return super().py__simple_getitem__(index) + if access is None: + return super().py__simple_getitem__(index) + + return ValueSet([create_from_access_path(self.inference_state, access)]) + + def py__getitem__(self, index_value_set, contextualized_node): + all_access_paths = self.access_handle.py__getitem__all_values() + if all_access_paths is None: + # This means basically that no __getitem__ has been defined on this + # object. + return super().py__getitem__(index_value_set, contextualized_node) + return ValueSet( + create_from_access_path(self.inference_state, access) + for access in all_access_paths + ) + + def py__iter__(self, contextualized_node=None): + if not self.access_handle.has_iter(): + yield from super().py__iter__(contextualized_node) + + access_path_list = self.access_handle.py__iter__list() + if access_path_list is None: + # There is no __iter__ method on this object. + return + + for access in access_path_list: + yield LazyKnownValue(create_from_access_path(self.inference_state, access)) + + def py__name__(self): + return self.access_handle.py__name__() + + @property + def name(self): + name = self.py__name__() + if name is None: + name = self.access_handle.get_repr() + return CompiledValueName(self, name) + + def _execute_function(self, params): + from jedi.inference import docstrings + from jedi.inference.compiled import builtin_from_name + if self.api_type != 'function': + return + + for name in self._parse_function_doc()[1].split(): + try: + # TODO wtf is this? this is exactly the same as the thing + # below. It uses getattr as well. + self.inference_state.builtins_module.access_handle.getattr_paths(name) + except AttributeError: + continue + else: + bltn_obj = builtin_from_name(self.inference_state, name) + yield from self.inference_state.execute(bltn_obj, params) + yield from docstrings.infer_return_types(self) + + def get_safe_value(self, default=_sentinel): + try: + return self.access_handle.get_safe_value() + except ValueError: + if default == _sentinel: + raise + return default + + def execute_operation(self, other, operator): + try: + return ValueSet([create_from_access_path( + self.inference_state, + self.access_handle.execute_operation(other.access_handle, operator) + )]) + except TypeError: + return NO_VALUES + + def execute_annotation(self): + if self.access_handle.get_repr() == 'None': + # None as an annotation doesn't need to be executed. + return ValueSet([self]) + + name, args = self.access_handle.get_annotation_name_and_args() + arguments = [ + ValueSet([create_from_access_path(self.inference_state, path)]) + for path in args + ] + if name == 'Union': + return ValueSet.from_sets(arg.execute_annotation() for arg in arguments) + elif name: + # While with_generics only exists on very specific objects, we + # should probably be fine, because we control all the typing + # objects. + return ValueSet([ + v.with_generics(arguments) + for v in self.inference_state.typing_module.py__getattribute__(name) + ]).execute_annotation() + return super().execute_annotation() + + def negate(self): + return create_from_access_path(self.inference_state, self.access_handle.negate()) + + def get_metaclasses(self): + return NO_VALUES + + def _as_context(self): + return CompiledContext(self) + + @property + def array_type(self): + return self.access_handle.get_array_type() + + def get_key_values(self): + return [ + create_from_access_path(self.inference_state, k) + for k in self.access_handle.get_key_paths() + ] + + def get_type_hint(self, add_class_info=True): + if self.access_handle.get_repr() in ('None', ""): + return 'None' + return None + + +class CompiledModule(CompiledValue): + file_io = None # For modules + + def _as_context(self): + return CompiledModuleContext(self) + + def py__path__(self): + return self.access_handle.py__path__() + + def is_package(self): + return self.py__path__() is not None + + @property + def string_names(self): + # For modules + name = self.py__name__() + if name is None: + return () + return tuple(name.split('.')) + + def py__file__(self) -> Optional[Path]: + return self.access_handle.py__file__() # type: ignore[no-any-return] + + +class CompiledName(AbstractNameDefinition): + def __init__(self, inference_state, parent_value, name): + self._inference_state = inference_state + self.parent_context = parent_value.as_context() + self._parent_value = parent_value + self.string_name = name + + def py__doc__(self): + return self.infer_compiled_value().py__doc__() + + def _get_qualified_names(self): + parent_qualified_names = self.parent_context.get_qualified_names() + if parent_qualified_names is None: + return None + return parent_qualified_names + (self.string_name,) + + def get_defining_qualified_value(self): + context = self.parent_context + if context.is_module() or context.is_class(): + return self.parent_context.get_value() # Might be None + + return None + + def __repr__(self): + try: + name = self.parent_context.name # __name__ is not defined all the time + except AttributeError: + name = None + return '<%s: (%s).%s>' % (self.__class__.__name__, name, self.string_name) + + @property + def api_type(self): + return self.infer_compiled_value().api_type + + def infer(self): + return ValueSet([self.infer_compiled_value()]) + + @memoize_method + def infer_compiled_value(self): + return create_from_name(self._inference_state, self._parent_value, self.string_name) + + +class SignatureParamName(ParamNameInterface, AbstractNameDefinition): + def __init__(self, compiled_value, signature_param): + self.parent_context = compiled_value.parent_context + self._signature_param = signature_param + + @property + def string_name(self): + return self._signature_param.name + + def to_string(self): + s = self._kind_string() + self.string_name + if self._signature_param.has_annotation: + s += ': ' + self._signature_param.annotation_string + if self._signature_param.has_default: + s += '=' + self._signature_param.default_string + return s + + def get_kind(self): + return getattr(Parameter, self._signature_param.kind_name) + + def infer(self): + p = self._signature_param + inference_state = self.parent_context.inference_state + values = NO_VALUES + if p.has_default: + values = ValueSet([create_from_access_path(inference_state, p.default)]) + if p.has_annotation: + annotation = create_from_access_path(inference_state, p.annotation) + values |= annotation.execute_with_values() + return values + + +class UnresolvableParamName(ParamNameInterface, AbstractNameDefinition): + def __init__(self, compiled_value, name, default): + self.parent_context = compiled_value.parent_context + self.string_name = name + self._default = default + + def get_kind(self): + return Parameter.POSITIONAL_ONLY + + def to_string(self): + string = self.string_name + if self._default: + string += '=' + self._default + return string + + def infer(self): + return NO_VALUES + + +class CompiledValueName(ValueNameMixin, AbstractNameDefinition): + def __init__(self, value, name): + self.string_name = name + self._value = value + self.parent_context = value.parent_context + + +class EmptyCompiledName(AbstractNameDefinition): + """ + Accessing some names will raise an exception. To avoid not having any + completions, just give Jedi the option to return this object. It infers to + nothing. + """ + def __init__(self, inference_state, name): + self.parent_context = inference_state.builtins_module + self.string_name = name + + def infer(self): + return NO_VALUES + + +class CompiledValueFilter(AbstractFilter): + def __init__(self, inference_state, compiled_value, is_instance=False): + self._inference_state = inference_state + self.compiled_value = compiled_value + self.is_instance = is_instance + + def get(self, name): + access_handle = self.compiled_value.access_handle + return self._get( + name, + lambda name, safe: access_handle.is_allowed_getattr(name, safe=safe), + lambda name: name in access_handle.dir(), + check_has_attribute=True + ) + + def _get(self, name, allowed_getattr_callback, in_dir_callback, check_has_attribute=False): + """ + To remove quite a few access calls we introduced the callback here. + """ + if self._inference_state.allow_descriptor_getattr: + pass + + has_attribute, is_descriptor = allowed_getattr_callback( + name, + safe=not self._inference_state.allow_descriptor_getattr + ) + if check_has_attribute and not has_attribute: + return [] + + if (is_descriptor or not has_attribute) \ + and not self._inference_state.allow_descriptor_getattr: + return [self._get_cached_name(name, is_empty=True)] + + if self.is_instance and not in_dir_callback(name): + return [] + return [self._get_cached_name(name)] + + @memoize_method + def _get_cached_name(self, name, is_empty=False): + if is_empty: + return EmptyCompiledName(self._inference_state, name) + else: + return self._create_name(name) + + def values(self): + from jedi.inference.compiled import builtin_from_name + names = [] + needs_type_completions, dir_infos = self.compiled_value.access_handle.get_dir_infos() + # We could use `safe=False` here as well, especially as a parameter to + # get_dir_infos. But this would lead to a lot of property executions + # that are probably not wanted. The drawback for this is that we + # have a different name for `get` and `values`. For `get` we always + # execute. + for name in dir_infos: + names += self._get( + name, + lambda name, safe: dir_infos[name], + lambda name: name in dir_infos, + ) + + # ``dir`` doesn't include the type names. + if not self.is_instance and needs_type_completions: + for filter in builtin_from_name(self._inference_state, 'type').get_filters(): + names += filter.values() + return names + + def _create_name(self, name): + return CompiledName( + self._inference_state, + self.compiled_value, + name + ) + + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, self.compiled_value) + + +docstr_defaults = { + 'floating point number': 'float', + 'character': 'str', + 'integer': 'int', + 'dictionary': 'dict', + 'string': 'str', +} + + +def _parse_function_doc(doc): + """ + Takes a function and returns the params and return value as a tuple. + This is nothing more than a docstring parser. + + TODO docstrings like utime(path, (atime, mtime)) and a(b [, b]) -> None + TODO docstrings like 'tuple of integers' + """ + # parse round parentheses: def func(a, (b,c)) + try: + count = 0 + start = doc.index('(') + for i, s in enumerate(doc[start:]): + if s == '(': + count += 1 + elif s == ')': + count -= 1 + if count == 0: + end = start + i + break + param_str = doc[start + 1:end] + except (ValueError, UnboundLocalError): + # ValueError for doc.index + # UnboundLocalError for undefined end in last line + debug.dbg('no brackets found - no param') + end = 0 + param_str = '' + else: + # remove square brackets, that show an optional param ( = None) + def change_options(m): + args = m.group(1).split(',') + for i, a in enumerate(args): + if a and '=' not in a: + args[i] += '=None' + return ','.join(args) + + while True: + param_str, changes = re.subn(r' ?\[([^\[\]]+)\]', + change_options, param_str) + if changes == 0: + break + param_str = param_str.replace('-', '_') # see: isinstance.__doc__ + + # parse return value + r = re.search('-[>-]* ', doc[end:end + 7]) + if r is None: + ret = '' + else: + index = end + r.end() + # get result type, which can contain newlines + pattern = re.compile(r'(,\n|[^\n-])+') + ret_str = pattern.match(doc, index).group(0).strip() + # New object -> object() + ret_str = re.sub(r'[nN]ew (.*)', r'\1()', ret_str) + + ret = docstr_defaults.get(ret_str, ret_str) + + return param_str, ret + + +def create_from_name(inference_state, compiled_value, name): + access_paths = compiled_value.access_handle.getattr_paths(name, default=None) + + value = None + for access_path in access_paths: + value = create_cached_compiled_value( + inference_state, + access_path, + parent_context=None if value is None else value.as_context(), + ) + return value + + +def _normalize_create_args(func): + """The cache doesn't care about keyword vs. normal args.""" + def wrapper(inference_state, obj, parent_context=None): + return func(inference_state, obj, parent_context) + return wrapper + + +def create_from_access_path(inference_state, access_path): + value = None + for name, access in access_path.accesses: + value = create_cached_compiled_value( + inference_state, + access, + parent_context=None if value is None else value.as_context() + ) + return value + + +@_normalize_create_args +@inference_state_function_cache() +def create_cached_compiled_value(inference_state, access_handle, parent_context): + assert not isinstance(parent_context, CompiledValue) + if parent_context is None: + cls = CompiledModule + else: + cls = CompiledValue + return cls(inference_state, access_handle, parent_context) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/context.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/context.py new file mode 100644 index 000000000..5bc6b9941 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/context.py @@ -0,0 +1,499 @@ +from abc import abstractmethod +from contextlib import contextmanager +from pathlib import Path +from typing import Optional + +from parso.tree import search_ancestor +from parso.python.tree import Name + +from jedi.inference.filters import ParserTreeFilter, MergedFilter, \ + GlobalNameFilter +from jedi.inference.names import AnonymousParamName, TreeNameDefinition +from jedi.inference.base_value import NO_VALUES, ValueSet +from jedi.parser_utils import get_parent_scope +from jedi import debug +from jedi import parser_utils + + +class AbstractContext: + # Must be defined: inference_state and tree_node and parent_context as an attribute/property + + def __init__(self, inference_state): + self.inference_state = inference_state + self.predefined_names = {} + + @abstractmethod + def get_filters(self, until_position=None, origin_scope=None): + raise NotImplementedError + + def goto(self, name_or_str, position): + from jedi.inference import finder + filters = _get_global_filters_for_name( + self, name_or_str if isinstance(name_or_str, Name) else None, position, + ) + names = finder.filter_name(filters, name_or_str) + debug.dbg('context.goto %s in (%s): %s', name_or_str, self, names) + return names + + def py__getattribute__(self, name_or_str, name_context=None, position=None, + analysis_errors=True): + """ + :param position: Position of the last statement -> tuple of line, column + """ + if name_context is None: + name_context = self + names = self.goto(name_or_str, position) + + string_name = name_or_str.value if isinstance(name_or_str, Name) else name_or_str + + # This paragraph is currently needed for proper branch type inference + # (static analysis). + found_predefined_types = None + if self.predefined_names and isinstance(name_or_str, Name): + node = name_or_str + while node is not None and not parser_utils.is_scope(node): + node = node.parent + if node.type in ("if_stmt", "for_stmt", "comp_for", 'sync_comp_for'): + try: + name_dict = self.predefined_names[node] + types = name_dict[string_name] + except KeyError: + continue + else: + found_predefined_types = types + break + if found_predefined_types is not None and names: + from jedi.inference import flow_analysis + check = flow_analysis.reachability_check( + context=self, + value_scope=self.tree_node, + node=name_or_str, + ) + if check is flow_analysis.UNREACHABLE: + values = NO_VALUES + else: + values = found_predefined_types + else: + values = ValueSet.from_sets(name.infer() for name in names) + + if not names and not values and analysis_errors: + if isinstance(name_or_str, Name): + from jedi.inference import analysis + message = ("NameError: name '%s' is not defined." % string_name) + analysis.add(name_context, 'name-error', name_or_str, message) + + debug.dbg('context.names_to_types: %s -> %s', names, values) + if values: + return values + return self._check_for_additional_knowledge(name_or_str, name_context, position) + + def _check_for_additional_knowledge(self, name_or_str, name_context, position): + name_context = name_context or self + # Add isinstance and other if/assert knowledge. + if isinstance(name_or_str, Name) and not name_context.is_instance(): + flow_scope = name_or_str + base_nodes = [name_context.tree_node] + + if any(b.type in ('comp_for', 'sync_comp_for') for b in base_nodes): + return NO_VALUES + from jedi.inference.finder import check_flow_information + while True: + flow_scope = get_parent_scope(flow_scope, include_flows=True) + n = check_flow_information(name_context, flow_scope, + name_or_str, position) + if n is not None: + return n + if flow_scope in base_nodes: + break + return NO_VALUES + + def get_root_context(self): + parent_context = self.parent_context + if parent_context is None: + return self + return parent_context.get_root_context() + + def is_module(self): + return False + + def is_builtins_module(self): + return False + + def is_class(self): + return False + + def is_stub(self): + return False + + def is_instance(self): + return False + + def is_compiled(self): + return False + + def is_bound_method(self): + return False + + @abstractmethod + def py__name__(self): + raise NotImplementedError + + def get_value(self): + raise NotImplementedError + + @property + def name(self): + return None + + def get_qualified_names(self): + return () + + def py__doc__(self): + return '' + + @contextmanager + def predefine_names(self, flow_scope, dct): + predefined = self.predefined_names + predefined[flow_scope] = dct + try: + yield + finally: + del predefined[flow_scope] + + +class ValueContext(AbstractContext): + """ + Should be defined, otherwise the API returns empty types. + """ + def __init__(self, value): + super().__init__(value.inference_state) + self._value = value + + @property + def tree_node(self): + return self._value.tree_node + + @property + def parent_context(self): + return self._value.parent_context + + def is_module(self): + return self._value.is_module() + + def is_builtins_module(self): + return self._value == self.inference_state.builtins_module + + def is_class(self): + return self._value.is_class() + + def is_stub(self): + return self._value.is_stub() + + def is_instance(self): + return self._value.is_instance() + + def is_compiled(self): + return self._value.is_compiled() + + def is_bound_method(self): + return self._value.is_bound_method() + + def py__name__(self): + return self._value.py__name__() + + @property + def name(self): + return self._value.name + + def get_qualified_names(self): + return self._value.get_qualified_names() + + def py__doc__(self): + return self._value.py__doc__() + + def get_value(self): + return self._value + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, self._value) + + +class TreeContextMixin: + def infer_node(self, node): + from jedi.inference.syntax_tree import infer_node + return infer_node(self, node) + + def create_value(self, node): + from jedi.inference import value + + if node == self.tree_node: + assert self.is_module() + return self.get_value() + + parent_context = self.create_context(node) + + if node.type in ('funcdef', 'lambdef'): + func = value.FunctionValue.from_context(parent_context, node) + if parent_context.is_class(): + class_value = parent_context.parent_context.create_value(parent_context.tree_node) + instance = value.AnonymousInstance( + self.inference_state, parent_context.parent_context, class_value) + func = value.BoundMethod( + instance=instance, + class_context=class_value.as_context(), + function=func + ) + return func + elif node.type == 'classdef': + return value.ClassValue(self.inference_state, parent_context, node) + else: + raise NotImplementedError("Probably shouldn't happen: %s" % node) + + def create_context(self, node): + def from_scope_node(scope_node, is_nested=True): + if scope_node == self.tree_node: + return self + + if scope_node.type in ('funcdef', 'lambdef', 'classdef'): + return self.create_value(scope_node).as_context() + elif scope_node.type in ('comp_for', 'sync_comp_for'): + parent_context = from_scope_node(parent_scope(scope_node.parent)) + if node.start_pos >= scope_node.children[-1].start_pos: + return parent_context + return CompForContext(parent_context, scope_node) + raise Exception("There's a scope that was not managed: %s" % scope_node) + + def parent_scope(node): + while True: + node = node.parent + + if parser_utils.is_scope(node): + return node + elif node.type in ('argument', 'testlist_comp'): + if node.children[1].type in ('comp_for', 'sync_comp_for'): + return node.children[1] + elif node.type == 'dictorsetmaker': + for n in node.children[1:4]: + # In dictionaries it can be pretty much anything. + if n.type in ('comp_for', 'sync_comp_for'): + return n + + scope_node = parent_scope(node) + if scope_node.type in ('funcdef', 'classdef'): + colon = scope_node.children[scope_node.children.index(':')] + if node.start_pos < colon.start_pos: + parent = node.parent + if not (parent.type == 'param' and parent.name == node): + scope_node = parent_scope(scope_node) + return from_scope_node(scope_node, is_nested=True) + + def create_name(self, tree_name): + definition = tree_name.get_definition() + if definition and definition.type == 'param' and definition.name == tree_name: + funcdef = search_ancestor(definition, 'funcdef', 'lambdef') + func = self.create_value(funcdef) + return AnonymousParamName(func, tree_name) + else: + context = self.create_context(tree_name) + return TreeNameDefinition(context, tree_name) + + +class FunctionContext(TreeContextMixin, ValueContext): + def get_filters(self, until_position=None, origin_scope=None): + yield ParserTreeFilter( + self.inference_state, + parent_context=self, + until_position=until_position, + origin_scope=origin_scope + ) + + +class ModuleContext(TreeContextMixin, ValueContext): + def py__file__(self) -> Optional[Path]: + return self._value.py__file__() # type: ignore[no-any-return] + + def get_filters(self, until_position=None, origin_scope=None): + filters = self._value.get_filters(origin_scope) + # Skip the first filter and replace it. + next(filters, None) + yield MergedFilter( + ParserTreeFilter( + parent_context=self, + until_position=until_position, + origin_scope=origin_scope + ), + self.get_global_filter(), + ) + yield from filters + + def get_global_filter(self): + return GlobalNameFilter(self) + + @property + def string_names(self): + return self._value.string_names + + @property + def code_lines(self): + return self._value.code_lines + + def get_value(self): + """ + This is the only function that converts a context back to a value. + This is necessary for stub -> python conversion and vice versa. However + this method shouldn't be moved to AbstractContext. + """ + return self._value + + +class NamespaceContext(TreeContextMixin, ValueContext): + def get_filters(self, until_position=None, origin_scope=None): + return self._value.get_filters() + + def get_value(self): + return self._value + + @property + def string_names(self): + return self._value.string_names + + def py__file__(self) -> Optional[Path]: + return self._value.py__file__() # type: ignore[no-any-return] + + +class ClassContext(TreeContextMixin, ValueContext): + def get_filters(self, until_position=None, origin_scope=None): + yield self.get_global_filter(until_position, origin_scope) + + def get_global_filter(self, until_position=None, origin_scope=None): + return ParserTreeFilter( + parent_context=self, + until_position=until_position, + origin_scope=origin_scope + ) + + +class CompForContext(TreeContextMixin, AbstractContext): + def __init__(self, parent_context, comp_for): + super().__init__(parent_context.inference_state) + self.tree_node = comp_for + self.parent_context = parent_context + + def get_filters(self, until_position=None, origin_scope=None): + yield ParserTreeFilter(self) + + def get_value(self): + return None + + def py__name__(self): + return '' + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, self.tree_node) + + +class CompiledContext(ValueContext): + def get_filters(self, until_position=None, origin_scope=None): + return self._value.get_filters() + + +class CompiledModuleContext(CompiledContext): + code_lines = None + + def get_value(self): + return self._value + + @property + def string_names(self): + return self._value.string_names + + def py__file__(self) -> Optional[Path]: + return self._value.py__file__() # type: ignore[no-any-return] + + +def _get_global_filters_for_name(context, name_or_none, position): + # For functions and classes the defaults don't belong to the + # function and get inferred in the value before the function. So + # make sure to exclude the function/class name. + if name_or_none is not None: + ancestor = search_ancestor(name_or_none, 'funcdef', 'classdef', 'lambdef') + lambdef = None + if ancestor == 'lambdef': + # For lambdas it's even more complicated since parts will + # be inferred later. + lambdef = ancestor + ancestor = search_ancestor(name_or_none, 'funcdef', 'classdef') + if ancestor is not None: + colon = ancestor.children[-2] + if position is not None and position < colon.start_pos: + if lambdef is None or position < lambdef.children[-2].start_pos: + position = ancestor.start_pos + + return get_global_filters(context, position, name_or_none) + + +def get_global_filters(context, until_position, origin_scope): + """ + Returns all filters in order of priority for name resolution. + + For global name lookups. The filters will handle name resolution + themselves, but here we gather possible filters downwards. + + >>> from jedi import Script + >>> script = Script(''' + ... x = ['a', 'b', 'c'] + ... def func(): + ... y = None + ... ''') + >>> module_node = script._module_node + >>> scope = next(module_node.iter_funcdefs()) + >>> scope + + >>> context = script._get_module_context().create_context(scope) + >>> filters = list(get_global_filters(context, (4, 0), None)) + + First we get the names from the function scope. + + >>> print(filters[0]) # doctest: +ELLIPSIS + MergedFilter(, ) + >>> sorted(str(n) for n in filters[0].values()) # doctest: +NORMALIZE_WHITESPACE + ['', + ''] + >>> filters[0]._filters[0]._until_position + (4, 0) + >>> filters[0]._filters[1]._until_position + + Then it yields the names from one level "lower". In this example, this is + the module scope (including globals). + As a side note, you can see, that the position in the filter is None on the + globals filter, because there the whole module is searched. + + >>> list(filters[1].values()) # package modules -> Also empty. + [] + >>> sorted(name.string_name for name in filters[2].values()) # Module attributes + ['__doc__', '__name__', '__package__'] + + Finally, it yields the builtin filter, if `include_builtin` is + true (default). + + >>> list(filters[3].values()) # doctest: +ELLIPSIS + [...] + """ + base_context = context + from jedi.inference.value.function import BaseFunctionExecutionContext + while context is not None: + # Names in methods cannot be resolved within the class. + yield from context.get_filters( + until_position=until_position, + origin_scope=origin_scope + ) + if isinstance(context, (BaseFunctionExecutionContext, ModuleContext)): + # The position should be reset if the current scope is a function. + until_position = None + + context = context.parent_context + + b = next(base_context.inference_state.builtins_module.get_filters(), None) + assert b is not None + # Add builtins to the global scope. + yield b diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/docstring_utils.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/docstring_utils.py new file mode 100644 index 000000000..bee0d75ec --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/docstring_utils.py @@ -0,0 +1,21 @@ +from jedi.inference.value import ModuleValue +from jedi.inference.context import ModuleContext + + +class DocstringModule(ModuleValue): + def __init__(self, in_module_context, **kwargs): + super().__init__(**kwargs) + self._in_module_context = in_module_context + + def _as_context(self): + return DocstringModuleContext(self, self._in_module_context) + + +class DocstringModuleContext(ModuleContext): + def __init__(self, module_value, in_module_context): + super().__init__(module_value) + self._in_module_context = in_module_context + + def get_filters(self, origin_scope=None, until_position=None): + yield from super().get_filters(until_position=until_position) + yield from self._in_module_context.get_filters() diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/docstrings.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/docstrings.py new file mode 100644 index 000000000..809974f37 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/docstrings.py @@ -0,0 +1,286 @@ +""" +Docstrings are another source of information for functions and classes. +:mod:`jedi.inference.dynamic_params` tries to find all executions of functions, +while the docstring parsing is much easier. There are three different types of +docstrings that |jedi| understands: + +- `Sphinx `_ +- `Epydoc `_ +- `Numpydoc `_ + +For example, the sphinx annotation ``:type foo: str`` clearly states that the +type of ``foo`` is ``str``. + +As an addition to parameter searching, this module also provides return +annotations. +""" + +import re +import warnings + +from parso import parse, ParserSyntaxError + +from jedi import debug +from jedi.inference.cache import inference_state_method_cache +from jedi.inference.base_value import iterator_to_value_set, ValueSet, \ + NO_VALUES +from jedi.inference.lazy_value import LazyKnownValues + + +DOCSTRING_PARAM_PATTERNS = [ + r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx + r'\s*:param\s+(\w+)\s+%s:[^\n]*', # Sphinx param with type + r'\s*@type\s+%s:\s*([^\n]+)', # Epydoc +] + +DOCSTRING_RETURN_PATTERNS = [ + re.compile(r'\s*:rtype:\s*([^\n]+)', re.M), # Sphinx + re.compile(r'\s*@rtype:\s*([^\n]+)', re.M), # Epydoc +] + +REST_ROLE_PATTERN = re.compile(r':[^`]+:`([^`]+)`') + + +_numpy_doc_string_cache = None + + +def _get_numpy_doc_string_cls(): + global _numpy_doc_string_cache + if isinstance(_numpy_doc_string_cache, (ImportError, SyntaxError)): + raise _numpy_doc_string_cache + from numpydoc.docscrape import NumpyDocString # type: ignore[import] + _numpy_doc_string_cache = NumpyDocString + return _numpy_doc_string_cache + + +def _search_param_in_numpydocstr(docstr, param_str): + """Search `docstr` (in numpydoc format) for type(-s) of `param_str`.""" + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + try: + # This is a non-public API. If it ever changes we should be + # prepared and return gracefully. + params = _get_numpy_doc_string_cls()(docstr)._parsed_data['Parameters'] + except Exception: + return [] + for p_name, p_type, p_descr in params: + if p_name == param_str: + m = re.match(r'([^,]+(,[^,]+)*?)(,[ ]*optional)?$', p_type) + if m: + p_type = m.group(1) + return list(_expand_typestr(p_type)) + return [] + + +def _search_return_in_numpydocstr(docstr): + """ + Search `docstr` (in numpydoc format) for type(-s) of function returns. + """ + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + try: + doc = _get_numpy_doc_string_cls()(docstr) + except Exception: + return + try: + # This is a non-public API. If it ever changes we should be + # prepared and return gracefully. + returns = doc._parsed_data['Returns'] + returns += doc._parsed_data['Yields'] + except Exception: + return + for r_name, r_type, r_descr in returns: + # Return names are optional and if so the type is in the name + if not r_type: + r_type = r_name + yield from _expand_typestr(r_type) + + +def _expand_typestr(type_str): + """ + Attempts to interpret the possible types in `type_str` + """ + # Check if alternative types are specified with 'or' + if re.search(r'\bor\b', type_str): + for t in type_str.split('or'): + yield t.split('of')[0].strip() + # Check if like "list of `type`" and set type to list + elif re.search(r'\bof\b', type_str): + yield type_str.split('of')[0] + # Check if type has is a set of valid literal values eg: {'C', 'F', 'A'} + elif type_str.startswith('{'): + node = parse(type_str, version='3.7').children[0] + if node.type == 'atom': + for leaf in getattr(node.children[1], "children", []): + if leaf.type == 'number': + if '.' in leaf.value: + yield 'float' + else: + yield 'int' + elif leaf.type == 'string': + if 'b' in leaf.string_prefix.lower(): + yield 'bytes' + else: + yield 'str' + # Ignore everything else. + + # Otherwise just work with what we have. + else: + yield type_str + + +def _search_param_in_docstr(docstr, param_str): + """ + Search `docstr` for type(-s) of `param_str`. + + >>> _search_param_in_docstr(':type param: int', 'param') + ['int'] + >>> _search_param_in_docstr('@type param: int', 'param') + ['int'] + >>> _search_param_in_docstr( + ... ':type param: :class:`threading.Thread`', 'param') + ['threading.Thread'] + >>> bool(_search_param_in_docstr('no document', 'param')) + False + >>> _search_param_in_docstr(':param int param: some description', 'param') + ['int'] + + """ + # look at #40 to see definitions of those params + patterns = [re.compile(p % re.escape(param_str)) + for p in DOCSTRING_PARAM_PATTERNS] + for pattern in patterns: + match = pattern.search(docstr) + if match: + return [_strip_rst_role(match.group(1))] + + return _search_param_in_numpydocstr(docstr, param_str) + + +def _strip_rst_role(type_str): + """ + Strip off the part looks like a ReST role in `type_str`. + + >>> _strip_rst_role(':class:`ClassName`') # strip off :class: + 'ClassName' + >>> _strip_rst_role(':py:obj:`module.Object`') # works with domain + 'module.Object' + >>> _strip_rst_role('ClassName') # do nothing when not ReST role + 'ClassName' + + See also: + http://sphinx-doc.org/domains.html#cross-referencing-python-objects + + """ + match = REST_ROLE_PATTERN.match(type_str) + if match: + return match.group(1) + else: + return type_str + + +def _infer_for_statement_string(module_context, string): + if string is None: + return [] + + potential_imports = re.findall(r'((?:\w+\.)*\w+)\.', string) + # Try to import module part in dotted name. + # (e.g., 'threading' in 'threading.Thread'). + imports = "\n".join(f"import {p}" for p in potential_imports) + string = f'{imports}\n{string}' + + debug.dbg('Parse docstring code %s', string, color='BLUE') + grammar = module_context.inference_state.grammar + try: + module = grammar.parse(string, error_recovery=False) + except ParserSyntaxError: + return [] + try: + # It's not the last item, because that's an end marker. + stmt = module.children[-2] + except (AttributeError, IndexError): + return [] + + if stmt.type not in ('name', 'atom', 'atom_expr'): + return [] + + # Here we basically use a fake module that also uses the filters in + # the actual module. + from jedi.inference.docstring_utils import DocstringModule + m = DocstringModule( + in_module_context=module_context, + inference_state=module_context.inference_state, + module_node=module, + code_lines=[], + ) + return list(_execute_types_in_stmt(m.as_context(), stmt)) + + +def _execute_types_in_stmt(module_context, stmt): + """ + Executing all types or general elements that we find in a statement. This + doesn't include tuple, list and dict literals, because the stuff they + contain is executed. (Used as type information). + """ + definitions = module_context.infer_node(stmt) + return ValueSet.from_sets( + _execute_array_values(module_context.inference_state, d) + for d in definitions + ) + + +def _execute_array_values(inference_state, array): + """ + Tuples indicate that there's not just one return value, but the listed + ones. `(str, int)` means that it returns a tuple with both types. + """ + from jedi.inference.value.iterable import SequenceLiteralValue, FakeTuple, FakeList + if isinstance(array, SequenceLiteralValue) and array.array_type in ('tuple', 'list'): + values = [] + for lazy_value in array.py__iter__(): + objects = ValueSet.from_sets( + _execute_array_values(inference_state, typ) + for typ in lazy_value.infer() + ) + values.append(LazyKnownValues(objects)) + cls = FakeTuple if array.array_type == 'tuple' else FakeList + return {cls(inference_state, values)} + else: + return array.execute_annotation() + + +@inference_state_method_cache() +def infer_param(function_value, param): + def infer_docstring(docstring): + return ValueSet( + p + for param_str in _search_param_in_docstr(docstring, param.name.value) + for p in _infer_for_statement_string(module_context, param_str) + ) + module_context = function_value.get_root_context() + func = param.get_parent_function() + if func.type == 'lambdef': + return NO_VALUES + + types = infer_docstring(function_value.py__doc__()) + if function_value.is_bound_method() \ + and function_value.py__name__() == '__init__': + types |= infer_docstring(function_value.class_context.py__doc__()) + + debug.dbg('Found param types for docstring: %s', types, color='BLUE') + return types + + +@inference_state_method_cache() +@iterator_to_value_set +def infer_return_types(function_value): + def search_return_in_docstr(code): + for p in DOCSTRING_RETURN_PATTERNS: + match = p.search(code) + if match: + yield _strip_rst_role(match.group(1)) + # Check for numpy style return hint + yield from _search_return_in_numpydocstr(code) + + for type_str in search_return_in_docstr(function_value.py__doc__()): + yield from _infer_for_statement_string(function_value.get_root_context(), type_str) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/dynamic_params.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/dynamic_params.py new file mode 100644 index 000000000..c47d4daee --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/dynamic_params.py @@ -0,0 +1,224 @@ +""" +One of the really important features of |jedi| is to have an option to +understand code like this:: + + def foo(bar): + bar. # completion here + foo(1) + +There's no doubt wheter bar is an ``int`` or not, but if there's also a call +like ``foo('str')``, what would happen? Well, we'll just show both. Because +that's what a human would expect. + +It works as follows: + +- |Jedi| sees a param +- search for function calls named ``foo`` +- execute these calls and check the input. +""" + +from jedi import settings +from jedi import debug +from jedi.parser_utils import get_parent_scope +from jedi.inference.cache import inference_state_method_cache +from jedi.inference.arguments import TreeArguments +from jedi.inference.param import get_executed_param_names +from jedi.inference.helpers import is_stdlib_path +from jedi.inference.utils import to_list +from jedi.inference.value import instance +from jedi.inference.base_value import ValueSet, NO_VALUES +from jedi.inference.references import get_module_contexts_containing_name +from jedi.inference import recursion + + +MAX_PARAM_SEARCHES = 20 + + +def _avoid_recursions(func): + def wrapper(function_value, param_index): + inf = function_value.inference_state + with recursion.execution_allowed(inf, function_value.tree_node) as allowed: + # We need to catch recursions that may occur, because an + # anonymous functions can create an anonymous parameter that is + # more or less self referencing. + if allowed: + inf.dynamic_params_depth += 1 + try: + return func(function_value, param_index) + finally: + inf.dynamic_params_depth -= 1 + return NO_VALUES + return wrapper + + +@debug.increase_indent +@_avoid_recursions +def dynamic_param_lookup(function_value, param_index): + """ + A dynamic search for param values. If you try to complete a type: + + >>> def func(foo): + ... foo + >>> func(1) + >>> func("") + + It is not known what the type ``foo`` without analysing the whole code. You + have to look for all calls to ``func`` to find out what ``foo`` possibly + is. + """ + funcdef = function_value.tree_node + + if not settings.dynamic_params: + return NO_VALUES + + path = function_value.get_root_context().py__file__() + if path is not None and is_stdlib_path(path): + # We don't want to search for references in the stdlib. Usually people + # don't work with it (except if you are a core maintainer, sorry). + # This makes everything slower. Just disable it and run the tests, + # you will see the slowdown, especially in 3.6. + return NO_VALUES + + if funcdef.type == 'lambdef': + string_name = _get_lambda_name(funcdef) + if string_name is None: + return NO_VALUES + else: + string_name = funcdef.name.value + debug.dbg('Dynamic param search in %s.', string_name, color='MAGENTA') + + module_context = function_value.get_root_context() + arguments_list = _search_function_arguments(module_context, funcdef, string_name) + values = ValueSet.from_sets( + get_executed_param_names( + function_value, arguments + )[param_index].infer() + for arguments in arguments_list + ) + debug.dbg('Dynamic param result finished', color='MAGENTA') + return values + + +@inference_state_method_cache(default=None) +@to_list +def _search_function_arguments(module_context, funcdef, string_name): + """ + Returns a list of param names. + """ + compare_node = funcdef + if string_name == '__init__': + cls = get_parent_scope(funcdef) + if cls.type == 'classdef': + string_name = cls.name.value + compare_node = cls + + found_arguments = False + i = 0 + inference_state = module_context.inference_state + + if settings.dynamic_params_for_other_modules: + module_contexts = get_module_contexts_containing_name( + inference_state, [module_context], string_name, + # Limit the amounts of files to be opened massively. + limit_reduction=5, + ) + else: + module_contexts = [module_context] + + for for_mod_context in module_contexts: + for name, trailer in _get_potential_nodes(for_mod_context, string_name): + i += 1 + + # This is a simple way to stop Jedi's dynamic param recursion + # from going wild: The deeper Jedi's in the recursion, the less + # code should be inferred. + if i * inference_state.dynamic_params_depth > MAX_PARAM_SEARCHES: + return + + random_context = for_mod_context.create_context(name) + for arguments in _check_name_for_execution( + inference_state, random_context, compare_node, name, trailer): + found_arguments = True + yield arguments + + # If there are results after processing a module, we're probably + # good to process. This is a speed optimization. + if found_arguments: + return + + +def _get_lambda_name(node): + stmt = node.parent + if stmt.type == 'expr_stmt': + first_operator = next(stmt.yield_operators(), None) + if first_operator == '=': + first = stmt.children[0] + if first.type == 'name': + return first.value + + return None + + +def _get_potential_nodes(module_value, func_string_name): + try: + names = module_value.tree_node.get_used_names()[func_string_name] + except KeyError: + return + + for name in names: + bracket = name.get_next_leaf() + trailer = bracket.parent + if trailer.type == 'trailer' and bracket == '(': + yield name, trailer + + +def _check_name_for_execution(inference_state, context, compare_node, name, trailer): + from jedi.inference.value.function import BaseFunctionExecutionContext + + def create_args(value): + arglist = trailer.children[1] + if arglist == ')': + arglist = None + args = TreeArguments(inference_state, context, arglist, trailer) + from jedi.inference.value.instance import InstanceArguments + if value.tree_node.type == 'classdef': + created_instance = instance.TreeInstance( + inference_state, + value.parent_context, + value, + args + ) + return InstanceArguments(created_instance, args) + else: + if value.is_bound_method(): + args = InstanceArguments(value.instance, args) + return args + + for value in inference_state.infer(context, name): + value_node = value.tree_node + if compare_node == value_node: + yield create_args(value) + elif isinstance(value.parent_context, BaseFunctionExecutionContext) \ + and compare_node.type == 'funcdef': + # Here we're trying to find decorators by checking the first + # parameter. It's not very generic though. Should find a better + # solution that also applies to nested decorators. + param_names = value.parent_context.get_param_names() + if len(param_names) != 1: + continue + values = param_names[0].infer() + if [v.tree_node for v in values] == [compare_node]: + # Found a decorator. + module_context = context.get_root_context() + execution_context = value.as_context(create_args(value)) + potential_nodes = _get_potential_nodes(module_context, param_names[0].string_name) + for name, trailer in potential_nodes: + if value_node.start_pos < name.start_pos < value_node.end_pos: + random_context = execution_context.create_context(name) + yield from _check_name_for_execution( + inference_state, + random_context, + compare_node, + name, + trailer + ) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/filters.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/filters.py new file mode 100644 index 000000000..62782334b --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/filters.py @@ -0,0 +1,371 @@ +""" +Filters are objects that you can use to filter names in different scopes. They +are needed for name resolution. +""" +from abc import abstractmethod +from typing import List, MutableMapping, Type +import weakref + +from parso.tree import search_ancestor +from parso.python.tree import Name, UsedNamesMapping + +from jedi.inference import flow_analysis +from jedi.inference.base_value import ValueSet, ValueWrapper, \ + LazyValueWrapper +from jedi.parser_utils import get_cached_parent_scope, get_parso_cache_node +from jedi.inference.utils import to_list +from jedi.inference.names import TreeNameDefinition, ParamName, \ + AnonymousParamName, AbstractNameDefinition, NameWrapper + +_definition_name_cache: MutableMapping[UsedNamesMapping, List[Name]] +_definition_name_cache = weakref.WeakKeyDictionary() + + +class AbstractFilter: + _until_position = None + + def _filter(self, names): + if self._until_position is not None: + return [n for n in names if n.start_pos < self._until_position] + return names + + @abstractmethod + def get(self, name): + raise NotImplementedError + + @abstractmethod + def values(self): + raise NotImplementedError + + +class FilterWrapper: + name_wrapper_class: Type[NameWrapper] + + def __init__(self, wrapped_filter): + self._wrapped_filter = wrapped_filter + + def wrap_names(self, names): + return [self.name_wrapper_class(name) for name in names] + + def get(self, name): + return self.wrap_names(self._wrapped_filter.get(name)) + + def values(self): + return self.wrap_names(self._wrapped_filter.values()) + + +def _get_definition_names(parso_cache_node, used_names, name_key): + if parso_cache_node is None: + names = used_names.get(name_key, ()) + return tuple(name for name in names if name.is_definition(include_setitem=True)) + + try: + for_module = _definition_name_cache[parso_cache_node] + except KeyError: + for_module = _definition_name_cache[parso_cache_node] = {} + + try: + return for_module[name_key] + except KeyError: + names = used_names.get(name_key, ()) + result = for_module[name_key] = tuple( + name for name in names if name.is_definition(include_setitem=True) + ) + return result + + +class _AbstractUsedNamesFilter(AbstractFilter): + name_class = TreeNameDefinition + + def __init__(self, parent_context, node_context=None): + if node_context is None: + node_context = parent_context + self._node_context = node_context + self._parser_scope = node_context.tree_node + module_context = node_context.get_root_context() + # It is quite hacky that we have to use that. This is for caching + # certain things with a WeakKeyDictionary. However, parso intentionally + # uses slots (to save memory) and therefore we end up with having to + # have a weak reference to the object that caches the tree. + # + # Previously we have tried to solve this by using a weak reference onto + # used_names. However that also does not work, because it has a + # reference from the module, which itself is referenced by any node + # through parents. + path = module_context.py__file__() + if path is None: + # If the path is None, there is no guarantee that parso caches it. + self._parso_cache_node = None + else: + self._parso_cache_node = get_parso_cache_node( + module_context.inference_state.latest_grammar + if module_context.is_stub() else module_context.inference_state.grammar, + path + ) + self._used_names = module_context.tree_node.get_used_names() + self.parent_context = parent_context + + def get(self, name): + return self._convert_names(self._filter( + _get_definition_names(self._parso_cache_node, self._used_names, name), + )) + + def _convert_names(self, names): + return [self.name_class(self.parent_context, name) for name in names] + + def values(self): + return self._convert_names( + name + for name_key in self._used_names + for name in self._filter( + _get_definition_names(self._parso_cache_node, self._used_names, name_key), + ) + ) + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.parent_context) + + +class ParserTreeFilter(_AbstractUsedNamesFilter): + def __init__(self, parent_context, node_context=None, until_position=None, + origin_scope=None): + """ + node_context is an option to specify a second value for use cases + like the class mro where the parent class of a new name would be the + value, but for some type inference it's important to have a local + value of the other classes. + """ + super().__init__(parent_context, node_context) + self._origin_scope = origin_scope + self._until_position = until_position + + def _filter(self, names): + names = super()._filter(names) + names = [n for n in names if self._is_name_reachable(n)] + return list(self._check_flows(names)) + + def _is_name_reachable(self, name): + parent = name.parent + if parent.type == 'trailer': + return False + base_node = parent if parent.type in ('classdef', 'funcdef') else name + return get_cached_parent_scope(self._parso_cache_node, base_node) == self._parser_scope + + def _check_flows(self, names): + for name in sorted(names, key=lambda name: name.start_pos, reverse=True): + check = flow_analysis.reachability_check( + context=self._node_context, + value_scope=self._parser_scope, + node=name, + origin_scope=self._origin_scope + ) + if check is not flow_analysis.UNREACHABLE: + yield name + + if check is flow_analysis.REACHABLE: + break + + +class _FunctionExecutionFilter(ParserTreeFilter): + def __init__(self, parent_context, function_value, until_position, origin_scope): + super().__init__( + parent_context, + until_position=until_position, + origin_scope=origin_scope, + ) + self._function_value = function_value + + def _convert_param(self, param, name): + raise NotImplementedError + + @to_list + def _convert_names(self, names): + for name in names: + param = search_ancestor(name, 'param') + # Here we don't need to check if the param is a default/annotation, + # because those are not definitions and never make it to this + # point. + if param: + yield self._convert_param(param, name) + else: + yield TreeNameDefinition(self.parent_context, name) + + +class FunctionExecutionFilter(_FunctionExecutionFilter): + def __init__(self, *args, arguments, **kwargs): + super().__init__(*args, **kwargs) + self._arguments = arguments + + def _convert_param(self, param, name): + return ParamName(self._function_value, name, self._arguments) + + +class AnonymousFunctionExecutionFilter(_FunctionExecutionFilter): + def _convert_param(self, param, name): + return AnonymousParamName(self._function_value, name) + + +class GlobalNameFilter(_AbstractUsedNamesFilter): + def get(self, name): + try: + names = self._used_names[name] + except KeyError: + return [] + return self._convert_names(self._filter(names)) + + @to_list + def _filter(self, names): + for name in names: + if name.parent.type == 'global_stmt': + yield name + + def values(self): + return self._convert_names( + name for name_list in self._used_names.values() + for name in self._filter(name_list) + ) + + +class DictFilter(AbstractFilter): + def __init__(self, dct): + self._dct = dct + + def get(self, name): + try: + value = self._convert(name, self._dct[name]) + except KeyError: + return [] + else: + return list(self._filter([value])) + + def values(self): + def yielder(): + for item in self._dct.items(): + try: + yield self._convert(*item) + except KeyError: + pass + return self._filter(yielder()) + + def _convert(self, name, value): + return value + + def __repr__(self): + keys = ', '.join(self._dct.keys()) + return '<%s: for {%s}>' % (self.__class__.__name__, keys) + + +class MergedFilter: + def __init__(self, *filters): + self._filters = filters + + def get(self, name): + return [n for filter in self._filters for n in filter.get(name)] + + def values(self): + return [n for filter in self._filters for n in filter.values()] + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, ', '.join(str(f) for f in self._filters)) + + +class _BuiltinMappedMethod(ValueWrapper): + """``Generator.__next__`` ``dict.values`` methods and so on.""" + api_type = 'function' + + def __init__(self, value, method, builtin_func): + super().__init__(builtin_func) + self._value = value + self._method = method + + def py__call__(self, arguments): + # TODO add TypeError if params are given/or not correct. + return self._method(self._value, arguments) + + +class SpecialMethodFilter(DictFilter): + """ + A filter for methods that are defined in this module on the corresponding + classes like Generator (for __next__, etc). + """ + class SpecialMethodName(AbstractNameDefinition): + api_type = 'function' + + def __init__(self, parent_context, string_name, callable_, builtin_value): + self.parent_context = parent_context + self.string_name = string_name + self._callable = callable_ + self._builtin_value = builtin_value + + def infer(self): + for filter in self._builtin_value.get_filters(): + # We can take the first index, because on builtin methods there's + # always only going to be one name. The same is true for the + # inferred values. + for name in filter.get(self.string_name): + builtin_func = next(iter(name.infer())) + break + else: + continue + break + return ValueSet([ + _BuiltinMappedMethod(self.parent_context, self._callable, builtin_func) + ]) + + def __init__(self, value, dct, builtin_value): + super().__init__(dct) + self.value = value + self._builtin_value = builtin_value + """ + This value is what will be used to introspect the name, where as the + other value will be used to execute the function. + + We distinguish, because we have to. + """ + + def _convert(self, name, value): + return self.SpecialMethodName(self.value, name, value, self._builtin_value) + + +class _OverwriteMeta(type): + def __init__(cls, name, bases, dct): + super().__init__(name, bases, dct) + + base_dct = {} + for base_cls in reversed(cls.__bases__): + try: + base_dct.update(base_cls.overwritten_methods) + except AttributeError: + pass + + for func in cls.__dict__.values(): + try: + base_dct.update(func.registered_overwritten_methods) + except AttributeError: + pass + cls.overwritten_methods = base_dct + + +class _AttributeOverwriteMixin: + def get_filters(self, *args, **kwargs): + yield SpecialMethodFilter(self, self.overwritten_methods, self._wrapped_value) + yield from self._wrapped_value.get_filters(*args, **kwargs) + + +class LazyAttributeOverwrite(_AttributeOverwriteMixin, LazyValueWrapper, + metaclass=_OverwriteMeta): + def __init__(self, inference_state): + self.inference_state = inference_state + + +class AttributeOverwrite(_AttributeOverwriteMixin, ValueWrapper, + metaclass=_OverwriteMeta): + pass + + +def publish_method(method_name): + def decorator(func): + dct = func.__dict__.setdefault('registered_overwritten_methods', {}) + dct[method_name] = func + return func + return decorator diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/finder.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/finder.py new file mode 100644 index 000000000..aac587324 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/finder.py @@ -0,0 +1,146 @@ +""" +Searching for names with given scope and name. This is very central in Jedi and +Python. The name resolution is quite complicated with descripter, +``__getattribute__``, ``__getattr__``, ``global``, etc. + +If you want to understand name resolution, please read the first few chapters +in http://blog.ionelmc.ro/2015/02/09/understanding-python-metaclasses/. + +Flow checks ++++++++++++ + +Flow checks are not really mature. There's only a check for ``isinstance``. It +would check whether a flow has the form of ``if isinstance(a, type_or_tuple)``. +Unfortunately every other thing is being ignored (e.g. a == '' would be easy to +check for -> a is a string). There's big potential in these checks. +""" + +from parso.tree import search_ancestor +from parso.python.tree import Name + +from jedi import settings +from jedi.inference.arguments import TreeArguments +from jedi.inference.value import iterable +from jedi.inference.base_value import NO_VALUES +from jedi.parser_utils import is_scope + + +def filter_name(filters, name_or_str): + """ + Searches names that are defined in a scope (the different + ``filters``), until a name fits. + """ + string_name = name_or_str.value if isinstance(name_or_str, Name) else name_or_str + names = [] + for filter in filters: + names = filter.get(string_name) + if names: + break + + return list(_remove_del_stmt(names)) + + +def _remove_del_stmt(names): + # Catch del statements and remove them from results. + for name in names: + if name.tree_name is not None: + definition = name.tree_name.get_definition() + if definition is not None and definition.type == 'del_stmt': + continue + yield name + + +def check_flow_information(value, flow, search_name, pos): + """ Try to find out the type of a variable just with the information that + is given by the flows: e.g. It is also responsible for assert checks.:: + + if isinstance(k, str): + k. # <- completion here + + ensures that `k` is a string. + """ + if not settings.dynamic_flow_information: + return None + + result = None + if is_scope(flow): + # Check for asserts. + module_node = flow.get_root_node() + try: + names = module_node.get_used_names()[search_name.value] + except KeyError: + return None + names = reversed([ + n for n in names + if flow.start_pos <= n.start_pos < (pos or flow.end_pos) + ]) + + for name in names: + ass = search_ancestor(name, 'assert_stmt') + if ass is not None: + result = _check_isinstance_type(value, ass.assertion, search_name) + if result is not None: + return result + + if flow.type in ('if_stmt', 'while_stmt'): + potential_ifs = [c for c in flow.children[1::4] if c != ':'] + for if_test in reversed(potential_ifs): + if search_name.start_pos > if_test.end_pos: + return _check_isinstance_type(value, if_test, search_name) + return result + + +def _get_isinstance_trailer_arglist(node): + if node.type in ('power', 'atom_expr') and len(node.children) == 2: + # This might be removed if we analyze and, etc + first, trailer = node.children + if first.type == 'name' and first.value == 'isinstance' \ + and trailer.type == 'trailer' and trailer.children[0] == '(': + return trailer + return None + + +def _check_isinstance_type(value, node, search_name): + lazy_cls = None + trailer = _get_isinstance_trailer_arglist(node) + if trailer is not None and len(trailer.children) == 3: + arglist = trailer.children[1] + args = TreeArguments(value.inference_state, value, arglist, trailer) + param_list = list(args.unpack()) + # Disallow keyword arguments + if len(param_list) == 2 and len(arglist.children) == 3: + (key1, _), (key2, lazy_value_cls) = param_list + if key1 is None and key2 is None: + call = _get_call_string(search_name) + is_instance_call = _get_call_string(arglist.children[0]) + # Do a simple get_code comparison of the strings . They should + # just have the same code, and everything will be all right. + # There are ways that this is not correct, if some stuff is + # redefined in between. However here we don't care, because + # it's a heuristic that works pretty well. + if call == is_instance_call: + lazy_cls = lazy_value_cls + if lazy_cls is None: + return None + + value_set = NO_VALUES + for cls_or_tup in lazy_cls.infer(): + if isinstance(cls_or_tup, iterable.Sequence) and cls_or_tup.array_type == 'tuple': + for lazy_value in cls_or_tup.py__iter__(): + value_set |= lazy_value.infer().execute_with_values() + else: + value_set |= cls_or_tup.execute_with_values() + return value_set + + +def _get_call_string(node): + if node.parent.type == 'atom_expr': + return _get_call_string(node.parent) + + code = '' + leaf = node.get_first_leaf() + end = node.get_last_leaf().end_pos + while leaf.start_pos < end: + code += leaf.value + leaf = leaf.get_next_leaf() + return code diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/flow_analysis.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/flow_analysis.py new file mode 100644 index 000000000..89bfe578d --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/flow_analysis.py @@ -0,0 +1,125 @@ +from typing import Dict, Optional + +from jedi.parser_utils import get_flow_branch_keyword, is_scope, get_parent_scope +from jedi.inference.recursion import execution_allowed +from jedi.inference.helpers import is_big_annoying_library + + +class Status: + lookup_table: Dict[Optional[bool], 'Status'] = {} + + def __init__(self, value: Optional[bool], name: str) -> None: + self._value = value + self._name = name + Status.lookup_table[value] = self + + def invert(self): + if self is REACHABLE: + return UNREACHABLE + elif self is UNREACHABLE: + return REACHABLE + else: + return UNSURE + + def __and__(self, other): + if UNSURE in (self, other): + return UNSURE + else: + return REACHABLE if self._value and other._value else UNREACHABLE + + def __repr__(self): + return '<%s: %s>' % (type(self).__name__, self._name) + + +REACHABLE = Status(True, 'reachable') +UNREACHABLE = Status(False, 'unreachable') +UNSURE = Status(None, 'unsure') + + +def _get_flow_scopes(node): + while True: + node = get_parent_scope(node, include_flows=True) + if node is None or is_scope(node): + return + yield node + + +def reachability_check(context, value_scope, node, origin_scope=None): + if is_big_annoying_library(context) \ + or not context.inference_state.flow_analysis_enabled: + return UNSURE + + first_flow_scope = get_parent_scope(node, include_flows=True) + if origin_scope is not None: + origin_flow_scopes = list(_get_flow_scopes(origin_scope)) + node_flow_scopes = list(_get_flow_scopes(node)) + + branch_matches = True + for flow_scope in origin_flow_scopes: + if flow_scope in node_flow_scopes: + node_keyword = get_flow_branch_keyword(flow_scope, node) + origin_keyword = get_flow_branch_keyword(flow_scope, origin_scope) + branch_matches = node_keyword == origin_keyword + if flow_scope.type == 'if_stmt': + if not branch_matches: + return UNREACHABLE + elif flow_scope.type == 'try_stmt': + if not branch_matches and origin_keyword == 'else' \ + and node_keyword == 'except': + return UNREACHABLE + if branch_matches: + break + + # Direct parents get resolved, we filter scopes that are separate + # branches. This makes sense for autocompletion and static analysis. + # For actual Python it doesn't matter, because we're talking about + # potentially unreachable code. + # e.g. `if 0:` would cause all name lookup within the flow make + # unaccessible. This is not a "problem" in Python, because the code is + # never called. In Jedi though, we still want to infer types. + while origin_scope is not None: + if first_flow_scope == origin_scope and branch_matches: + return REACHABLE + origin_scope = origin_scope.parent + + return _break_check(context, value_scope, first_flow_scope, node) + + +def _break_check(context, value_scope, flow_scope, node): + reachable = REACHABLE + if flow_scope.type == 'if_stmt': + if flow_scope.is_node_after_else(node): + for check_node in flow_scope.get_test_nodes(): + reachable = _check_if(context, check_node) + if reachable in (REACHABLE, UNSURE): + break + reachable = reachable.invert() + else: + flow_node = flow_scope.get_corresponding_test_node(node) + if flow_node is not None: + reachable = _check_if(context, flow_node) + elif flow_scope.type in ('try_stmt', 'while_stmt'): + return UNSURE + + # Only reachable branches need to be examined further. + if reachable in (UNREACHABLE, UNSURE): + return reachable + + if value_scope != flow_scope and value_scope != flow_scope.parent: + flow_scope = get_parent_scope(flow_scope, include_flows=True) + return reachable & _break_check(context, value_scope, flow_scope, node) + else: + return reachable + + +def _check_if(context, node): + with execution_allowed(context.inference_state, node) as allowed: + if not allowed: + return UNSURE + + types = context.infer_node(node) + values = set(x.py__bool__() for x in types) + if len(values) == 1: + return Status.lookup_table[values.pop()] + else: + return UNSURE diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/__init__.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/__init__.py new file mode 100644 index 000000000..5c86b7b34 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/__init__.py @@ -0,0 +1,4 @@ +""" +It is unfortunately not well documented how stubs and annotations work in Jedi. +If somebody needs an introduction, please let me know. +""" diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/annotation.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/annotation.py new file mode 100644 index 000000000..81780010f --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/annotation.py @@ -0,0 +1,470 @@ +""" +PEP 0484 ( https://www.python.org/dev/peps/pep-0484/ ) describes type hints +through function annotations. There is a strong suggestion in this document +that only the type of type hinting defined in PEP0484 should be allowed +as annotations in future python versions. +""" + +import re +from inspect import Parameter + +from parso import ParserSyntaxError, parse + +from jedi.inference.cache import inference_state_method_cache +from jedi.inference.base_value import ValueSet, NO_VALUES +from jedi.inference.gradual.base import DefineGenericBaseClass, GenericClass +from jedi.inference.gradual.generics import TupleGenericManager +from jedi.inference.gradual.type_var import TypeVar +from jedi.inference.helpers import is_string +from jedi.inference.compiled import builtin_from_name +from jedi.inference.param import get_executed_param_names +from jedi import debug +from jedi import parser_utils + + +def infer_annotation(context, annotation): + """ + Inferes an annotation node. This means that it inferes the part of + `int` here: + + foo: int = 3 + + Also checks for forward references (strings) + """ + value_set = context.infer_node(annotation) + if len(value_set) != 1: + debug.warning("Inferred typing index %s should lead to 1 object, " + " not %s" % (annotation, value_set)) + return value_set + + inferred_value = list(value_set)[0] + if is_string(inferred_value): + result = _get_forward_reference_node(context, inferred_value.get_safe_value()) + if result is not None: + return context.infer_node(result) + return value_set + + +def _infer_annotation_string(context, string, index=None): + node = _get_forward_reference_node(context, string) + if node is None: + return NO_VALUES + + value_set = context.infer_node(node) + if index is not None: + value_set = value_set.filter( + lambda value: ( + value.array_type == 'tuple' + and len(list(value.py__iter__())) >= index + ) + ).py__simple_getitem__(index) + return value_set + + +def _get_forward_reference_node(context, string): + try: + new_node = context.inference_state.grammar.parse( + string, + start_symbol='eval_input', + error_recovery=False + ) + except ParserSyntaxError: + debug.warning('Annotation not parsed: %s' % string) + return None + else: + module = context.tree_node.get_root_node() + parser_utils.move(new_node, module.end_pos[0]) + new_node.parent = context.tree_node + return new_node + + +def _split_comment_param_declaration(decl_text): + """ + Split decl_text on commas, but group generic expressions + together. + + For example, given "foo, Bar[baz, biz]" we return + ['foo', 'Bar[baz, biz]']. + + """ + try: + node = parse(decl_text, error_recovery=False).children[0] + except ParserSyntaxError: + debug.warning('Comment annotation is not valid Python: %s' % decl_text) + return [] + + if node.type in ['name', 'atom_expr', 'power']: + return [node.get_code().strip()] + + params = [] + try: + children = node.children + except AttributeError: + return [] + else: + for child in children: + if child.type in ['name', 'atom_expr', 'power']: + params.append(child.get_code().strip()) + + return params + + +@inference_state_method_cache() +def infer_param(function_value, param, ignore_stars=False): + values = _infer_param(function_value, param) + if ignore_stars or not values: + return values + inference_state = function_value.inference_state + if param.star_count == 1: + tuple_ = builtin_from_name(inference_state, 'tuple') + return ValueSet([GenericClass( + tuple_, + TupleGenericManager((values,)), + )]) + elif param.star_count == 2: + dct = builtin_from_name(inference_state, 'dict') + generics = ( + ValueSet([builtin_from_name(inference_state, 'str')]), + values + ) + return ValueSet([GenericClass( + dct, + TupleGenericManager(generics), + )]) + return values + + +def _infer_param(function_value, param): + """ + Infers the type of a function parameter, using type annotations. + """ + annotation = param.annotation + if annotation is None: + # If no Python 3-style annotation, look for a comment annotation. + # Identify parameters to function in the same sequence as they would + # appear in a type comment. + all_params = [child for child in param.parent.children + if child.type == 'param'] + + node = param.parent.parent + comment = parser_utils.get_following_comment_same_line(node) + if comment is None: + return NO_VALUES + + match = re.match(r"^#\s*type:\s*\(([^#]*)\)\s*->", comment) + if not match: + return NO_VALUES + params_comments = _split_comment_param_declaration(match.group(1)) + + # Find the specific param being investigated + index = all_params.index(param) + # If the number of parameters doesn't match length of type comment, + # ignore first parameter (assume it's self). + if len(params_comments) != len(all_params): + debug.warning( + "Comments length != Params length %s %s", + params_comments, all_params + ) + if function_value.is_bound_method(): + if index == 0: + # Assume it's self, which is already handled + return NO_VALUES + index -= 1 + if index >= len(params_comments): + return NO_VALUES + + param_comment = params_comments[index] + return _infer_annotation_string( + function_value.get_default_param_context(), + param_comment + ) + # Annotations are like default params and resolve in the same way. + context = function_value.get_default_param_context() + return infer_annotation(context, annotation) + + +def py__annotations__(funcdef): + dct = {} + for function_param in funcdef.get_params(): + param_annotation = function_param.annotation + if param_annotation is not None: + dct[function_param.name.value] = param_annotation + + return_annotation = funcdef.annotation + if return_annotation: + dct['return'] = return_annotation + return dct + + +def resolve_forward_references(context, all_annotations): + def resolve(node): + if node is None or node.type != 'string': + return node + + node = _get_forward_reference_node( + context, + context.inference_state.compiled_subprocess.safe_literal_eval( + node.value, + ), + ) + + if node is None: + # There was a string, but it's not a valid annotation + return None + + # The forward reference tree has an additional root node ('eval_input') + # that we don't want. Extract the node we do want, that is equivalent to + # the nodes returned by `py__annotations__` for a non-quoted node. + node = node.children[0] + + return node + + return {name: resolve(node) for name, node in all_annotations.items()} + + +@inference_state_method_cache() +def infer_return_types(function, arguments): + """ + Infers the type of a function's return value, + according to type annotations. + """ + context = function.get_default_param_context() + all_annotations = resolve_forward_references( + context, + py__annotations__(function.tree_node), + ) + annotation = all_annotations.get("return", None) + if annotation is None: + # If there is no Python 3-type annotation, look for an annotation + # comment. + node = function.tree_node + comment = parser_utils.get_following_comment_same_line(node) + if comment is None: + return NO_VALUES + + match = re.match(r"^#\s*type:\s*\([^#]*\)\s*->\s*([^#]*)", comment) + if not match: + return NO_VALUES + + return _infer_annotation_string( + context, + match.group(1).strip() + ).execute_annotation() + + unknown_type_vars = find_unknown_type_vars(context, annotation) + annotation_values = infer_annotation(context, annotation) + if not unknown_type_vars: + return annotation_values.execute_annotation() + + type_var_dict = infer_type_vars_for_execution(function, arguments, all_annotations) + + return ValueSet.from_sets( + ann.define_generics(type_var_dict) + if isinstance(ann, (DefineGenericBaseClass, TypeVar)) else ValueSet({ann}) + for ann in annotation_values + ).execute_annotation() + + +def infer_type_vars_for_execution(function, arguments, annotation_dict): + """ + Some functions use type vars that are not defined by the class, but rather + only defined in the function. See for example `iter`. In those cases we + want to: + + 1. Search for undefined type vars. + 2. Infer type vars with the execution state we have. + 3. Return the union of all type vars that have been found. + """ + context = function.get_default_param_context() + + annotation_variable_results = {} + executed_param_names = get_executed_param_names(function, arguments) + for executed_param_name in executed_param_names: + try: + annotation_node = annotation_dict[executed_param_name.string_name] + except KeyError: + continue + + annotation_variables = find_unknown_type_vars(context, annotation_node) + if annotation_variables: + # Infer unknown type var + annotation_value_set = context.infer_node(annotation_node) + kind = executed_param_name.get_kind() + actual_value_set = executed_param_name.infer() + if kind is Parameter.VAR_POSITIONAL: + actual_value_set = actual_value_set.merge_types_of_iterate() + elif kind is Parameter.VAR_KEYWORD: + # TODO _dict_values is not public. + actual_value_set = actual_value_set.try_merge('_dict_values') + merge_type_var_dicts( + annotation_variable_results, + annotation_value_set.infer_type_vars(actual_value_set), + ) + return annotation_variable_results + + +def infer_return_for_callable(arguments, param_values, result_values): + all_type_vars = {} + for pv in param_values: + if pv.array_type == 'list': + type_var_dict = _infer_type_vars_for_callable(arguments, pv.py__iter__()) + all_type_vars.update(type_var_dict) + + return ValueSet.from_sets( + v.define_generics(all_type_vars) + if isinstance(v, (DefineGenericBaseClass, TypeVar)) + else ValueSet({v}) + for v in result_values + ).execute_annotation() + + +def _infer_type_vars_for_callable(arguments, lazy_params): + """ + Infers type vars for the Calllable class: + + def x() -> Callable[[Callable[..., _T]], _T]: ... + """ + annotation_variable_results = {} + for (_, lazy_value), lazy_callable_param in zip(arguments.unpack(), lazy_params): + callable_param_values = lazy_callable_param.infer() + # Infer unknown type var + actual_value_set = lazy_value.infer() + merge_type_var_dicts( + annotation_variable_results, + callable_param_values.infer_type_vars(actual_value_set), + ) + return annotation_variable_results + + +def merge_type_var_dicts(base_dict, new_dict): + for type_var_name, values in new_dict.items(): + if values: + try: + base_dict[type_var_name] |= values + except KeyError: + base_dict[type_var_name] = values + + +def merge_pairwise_generics(annotation_value, annotated_argument_class): + """ + Match up the generic parameters from the given argument class to the + target annotation. + + This walks the generic parameters immediately within the annotation and + argument's type, in order to determine the concrete values of the + annotation's parameters for the current case. + + For example, given the following code: + + def values(mapping: Mapping[K, V]) -> List[V]: ... + + for val in values({1: 'a'}): + val + + Then this function should be given representations of `Mapping[K, V]` + and `Mapping[int, str]`, so that it can determine that `K` is `int and + `V` is `str`. + + Note that it is responsibility of the caller to traverse the MRO of the + argument type as needed in order to find the type matching the + annotation (in this case finding `Mapping[int, str]` as a parent of + `Dict[int, str]`). + + Parameters + ---------- + + `annotation_value`: represents the annotation to infer the concrete + parameter types of. + + `annotated_argument_class`: represents the annotated class of the + argument being passed to the object annotated by `annotation_value`. + """ + + type_var_dict = {} + + if not isinstance(annotated_argument_class, DefineGenericBaseClass): + return type_var_dict + + annotation_generics = annotation_value.get_generics() + actual_generics = annotated_argument_class.get_generics() + + for annotation_generics_set, actual_generic_set in zip(annotation_generics, actual_generics): + merge_type_var_dicts( + type_var_dict, + annotation_generics_set.infer_type_vars(actual_generic_set.execute_annotation()), + ) + + return type_var_dict + + +def find_type_from_comment_hint_for(context, node, name): + return _find_type_from_comment_hint(context, node, node.children[1], name) + + +def find_type_from_comment_hint_with(context, node, name): + assert len(node.children[1].children) == 3, \ + "Can only be here when children[1] is 'foo() as f'" + varlist = node.children[1].children[2] + return _find_type_from_comment_hint(context, node, varlist, name) + + +def find_type_from_comment_hint_assign(context, node, name): + return _find_type_from_comment_hint(context, node, node.children[0], name) + + +def _find_type_from_comment_hint(context, node, varlist, name): + index = None + if varlist.type in ("testlist_star_expr", "exprlist", "testlist"): + # something like "a, b = 1, 2" + index = 0 + for child in varlist.children: + if child == name: + break + if child.type == "operator": + continue + index += 1 + else: + return [] + + comment = parser_utils.get_following_comment_same_line(node) + if comment is None: + return [] + match = re.match(r"^#\s*type:\s*([^#]*)", comment) + if match is None: + return [] + return _infer_annotation_string( + context, match.group(1).strip(), index + ).execute_annotation() + + +def find_unknown_type_vars(context, node): + def check_node(node): + if node.type in ('atom_expr', 'power'): + trailer = node.children[-1] + if trailer.type == 'trailer' and trailer.children[0] == '[': + for subscript_node in _unpack_subscriptlist(trailer.children[1]): + check_node(subscript_node) + else: + found[:] = _filter_type_vars(context.infer_node(node), found) + + found = [] # We're not using a set, because the order matters. + check_node(node) + return found + + +def _filter_type_vars(value_set, found=()): + new_found = list(found) + for type_var in value_set: + if isinstance(type_var, TypeVar) and type_var not in found: + new_found.append(type_var) + return new_found + + +def _unpack_subscriptlist(subscriptlist): + if subscriptlist.type == 'subscriptlist': + for subscript in subscriptlist.children[::2]: + if subscript.type != 'subscript': + yield subscript + else: + if subscriptlist.type != 'subscript': + yield subscriptlist diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/base.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/base.py new file mode 100644 index 000000000..ce574297e --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/base.py @@ -0,0 +1,434 @@ +from jedi.inference.cache import inference_state_method_cache +from jedi.inference.base_value import ValueSet, NO_VALUES, Value, \ + iterator_to_value_set, LazyValueWrapper, ValueWrapper +from jedi.inference.compiled import builtin_from_name +from jedi.inference.value.klass import ClassFilter +from jedi.inference.value.klass import ClassMixin +from jedi.inference.utils import to_list +from jedi.inference.names import AbstractNameDefinition, ValueName +from jedi.inference.context import ClassContext +from jedi.inference.gradual.generics import TupleGenericManager + + +class _BoundTypeVarName(AbstractNameDefinition): + """ + This type var was bound to a certain type, e.g. int. + """ + def __init__(self, type_var, value_set): + self._type_var = type_var + self.parent_context = type_var.parent_context + self._value_set = value_set + + def infer(self): + def iter_(): + for value in self._value_set: + # Replace any with the constraints if they are there. + from jedi.inference.gradual.typing import AnyClass + if isinstance(value, AnyClass): + yield from self._type_var.constraints + else: + yield value + return ValueSet(iter_()) + + def py__name__(self): + return self._type_var.py__name__() + + def __repr__(self): + return '<%s %s -> %s>' % (self.__class__.__name__, self.py__name__(), self._value_set) + + +class _TypeVarFilter: + """ + A filter for all given variables in a class. + + A = TypeVar('A') + B = TypeVar('B') + class Foo(Mapping[A, B]): + ... + + In this example we would have two type vars given: A and B + """ + def __init__(self, generics, type_vars): + self._generics = generics + self._type_vars = type_vars + + def get(self, name): + for i, type_var in enumerate(self._type_vars): + if type_var.py__name__() == name: + try: + return [_BoundTypeVarName(type_var, self._generics[i])] + except IndexError: + return [type_var.name] + return [] + + def values(self): + # The values are not relevant. If it's not searched exactly, the type + # vars are just global and should be looked up as that. + return [] + + +class _AnnotatedClassContext(ClassContext): + def get_filters(self, *args, **kwargs): + filters = super().get_filters( + *args, **kwargs + ) + yield from filters + + # The type vars can only be looked up if it's a global search and + # not a direct lookup on the class. + yield self._value.get_type_var_filter() + + +class DefineGenericBaseClass(LazyValueWrapper): + def __init__(self, generics_manager): + self._generics_manager = generics_manager + + def _create_instance_with_generics(self, generics_manager): + raise NotImplementedError + + @inference_state_method_cache() + def get_generics(self): + return self._generics_manager.to_tuple() + + def define_generics(self, type_var_dict): + from jedi.inference.gradual.type_var import TypeVar + changed = False + new_generics = [] + for generic_set in self.get_generics(): + values = NO_VALUES + for generic in generic_set: + if isinstance(generic, (DefineGenericBaseClass, TypeVar)): + result = generic.define_generics(type_var_dict) + values |= result + if result != ValueSet({generic}): + changed = True + else: + values |= ValueSet([generic]) + new_generics.append(values) + + if not changed: + # There might not be any type vars that change. In that case just + # return itself, because it does not make sense to potentially lose + # cached results. + return ValueSet([self]) + + return ValueSet([self._create_instance_with_generics( + TupleGenericManager(tuple(new_generics)) + )]) + + def is_same_class(self, other): + if not isinstance(other, DefineGenericBaseClass): + return False + + if self.tree_node != other.tree_node: + # TODO not sure if this is nice. + return False + given_params1 = self.get_generics() + given_params2 = other.get_generics() + + if len(given_params1) != len(given_params2): + # If the amount of type vars doesn't match, the class doesn't + # match. + return False + + # Now compare generics + return all( + any( + # TODO why is this ordering the correct one? + cls2.is_same_class(cls1) + # TODO I'm still not sure gather_annotation_classes is a good + # idea. They are essentially here to avoid comparing Tuple <=> + # tuple and instead compare tuple <=> tuple, but at the moment + # the whole `is_same_class` and `is_sub_class` matching is just + # not in the best shape. + for cls1 in class_set1.gather_annotation_classes() + for cls2 in class_set2.gather_annotation_classes() + ) for class_set1, class_set2 in zip(given_params1, given_params2) + ) + + def get_signatures(self): + return [] + + def __repr__(self): + return '<%s: %s%s>' % ( + self.__class__.__name__, + self._wrapped_value, + list(self.get_generics()), + ) + + +class GenericClass(DefineGenericBaseClass, ClassMixin): + """ + A class that is defined with generics, might be something simple like: + + class Foo(Generic[T]): ... + my_foo_int_cls = Foo[int] + """ + def __init__(self, class_value, generics_manager): + super().__init__(generics_manager) + self._class_value = class_value + + def _get_wrapped_value(self): + return self._class_value + + def get_type_hint(self, add_class_info=True): + n = self.py__name__() + # Not sure if this is the best way to do this, but all of these types + # are a bit special in that they have type aliases and other ways to + # become lower case. It's probably better to make them upper case, + # because that's what you can use in annotations. + n = dict(list="List", dict="Dict", set="Set", tuple="Tuple").get(n, n) + s = n + self._generics_manager.get_type_hint() + if add_class_info: + return 'Type[%s]' % s + return s + + def get_type_var_filter(self): + return _TypeVarFilter(self.get_generics(), self.list_type_vars()) + + def py__call__(self, arguments): + instance, = super().py__call__(arguments) + return ValueSet([_GenericInstanceWrapper(instance)]) + + def _as_context(self): + return _AnnotatedClassContext(self) + + @to_list + def py__bases__(self): + for base in self._wrapped_value.py__bases__(): + yield _LazyGenericBaseClass(self, base, self._generics_manager) + + def _create_instance_with_generics(self, generics_manager): + return GenericClass(self._class_value, generics_manager) + + def is_sub_class_of(self, class_value): + if super().is_sub_class_of(class_value): + return True + return self._class_value.is_sub_class_of(class_value) + + def with_generics(self, generics_tuple): + return self._class_value.with_generics(generics_tuple) + + def infer_type_vars(self, value_set): + # Circular + from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts + + annotation_name = self.py__name__() + type_var_dict = {} + if annotation_name == 'Iterable': + annotation_generics = self.get_generics() + if annotation_generics: + return annotation_generics[0].infer_type_vars( + value_set.merge_types_of_iterate(), + ) + else: + # Note: we need to handle the MRO _in order_, so we need to extract + # the elements from the set first, then handle them, even if we put + # them back in a set afterwards. + for py_class in value_set: + if py_class.is_instance() and not py_class.is_compiled(): + py_class = py_class.get_annotated_class_object() + else: + continue + + if py_class.api_type != 'class': + # Functions & modules don't have an MRO and we're not + # expecting a Callable (those are handled separately within + # TypingClassValueWithIndex). + continue + + for parent_class in py_class.py__mro__(): + class_name = parent_class.py__name__() + if annotation_name == class_name: + merge_type_var_dicts( + type_var_dict, + merge_pairwise_generics(self, parent_class), + ) + break + + return type_var_dict + + +class _LazyGenericBaseClass: + def __init__(self, class_value, lazy_base_class, generics_manager): + self._class_value = class_value + self._lazy_base_class = lazy_base_class + self._generics_manager = generics_manager + + @iterator_to_value_set + def infer(self): + for base in self._lazy_base_class.infer(): + if isinstance(base, GenericClass): + # Here we have to recalculate the given types. + yield GenericClass.create_cached( + base.inference_state, + base._wrapped_value, + TupleGenericManager(tuple(self._remap_type_vars(base))), + ) + else: + if base.is_class_mixin(): + # This case basically allows classes like `class Foo(List)` + # to be used like `Foo[int]`. The generics are not + # necessary and can be used later. + yield GenericClass.create_cached( + base.inference_state, + base, + self._generics_manager, + ) + else: + yield base + + def _remap_type_vars(self, base): + from jedi.inference.gradual.type_var import TypeVar + filter = self._class_value.get_type_var_filter() + for type_var_set in base.get_generics(): + new = NO_VALUES + for type_var in type_var_set: + if isinstance(type_var, TypeVar): + names = filter.get(type_var.py__name__()) + new |= ValueSet.from_sets( + name.infer() for name in names + ) + else: + # Mostly will be type vars, except if in some cases + # a concrete type will already be there. In that + # case just add it to the value set. + new |= ValueSet([type_var]) + yield new + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._lazy_base_class) + + +class _GenericInstanceWrapper(ValueWrapper): + def py__stop_iteration_returns(self): + for cls in self._wrapped_value.class_value.py__mro__(): + if cls.py__name__() == 'Generator': + generics = cls.get_generics() + try: + return generics[2].execute_annotation() + except IndexError: + pass + elif cls.py__name__() == 'Iterator': + return ValueSet([builtin_from_name(self.inference_state, 'None')]) + return self._wrapped_value.py__stop_iteration_returns() + + def get_type_hint(self, add_class_info=True): + return self._wrapped_value.class_value.get_type_hint(add_class_info=False) + + +class _PseudoTreeNameClass(Value): + """ + In typeshed, some classes are defined like this: + + Tuple: _SpecialForm = ... + + Now this is not a real class, therefore we have to do some workarounds like + this class. Essentially this class makes it possible to goto that `Tuple` + name, without affecting anything else negatively. + """ + api_type = 'class' + + def __init__(self, parent_context, tree_name): + super().__init__( + parent_context.inference_state, + parent_context + ) + self._tree_name = tree_name + + @property + def tree_node(self): + return self._tree_name + + def get_filters(self, *args, **kwargs): + # TODO this is obviously wrong. Is it though? + class EmptyFilter(ClassFilter): + def __init__(self): + pass + + def get(self, name, **kwargs): + return [] + + def values(self, **kwargs): + return [] + + yield EmptyFilter() + + def py__class__(self): + # This might not be 100% correct, but it is good enough. The details of + # the typing library are not really an issue for Jedi. + return builtin_from_name(self.inference_state, 'type') + + @property + def name(self): + return ValueName(self, self._tree_name) + + def get_qualified_names(self): + return (self._tree_name.value,) + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, self._tree_name.value) + + +class BaseTypingValue(LazyValueWrapper): + def __init__(self, parent_context, tree_name): + self.inference_state = parent_context.inference_state + self.parent_context = parent_context + self._tree_name = tree_name + + @property + def name(self): + return ValueName(self, self._tree_name) + + def _get_wrapped_value(self): + return _PseudoTreeNameClass(self.parent_context, self._tree_name) + + def get_signatures(self): + return self._wrapped_value.get_signatures() + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, self._tree_name.value) + + +class BaseTypingClassWithGenerics(DefineGenericBaseClass): + def __init__(self, parent_context, tree_name, generics_manager): + super().__init__(generics_manager) + self.inference_state = parent_context.inference_state + self.parent_context = parent_context + self._tree_name = tree_name + + def _get_wrapped_value(self): + return _PseudoTreeNameClass(self.parent_context, self._tree_name) + + def __repr__(self): + return '%s(%s%s)' % (self.__class__.__name__, self._tree_name.value, + self._generics_manager) + + +class BaseTypingInstance(LazyValueWrapper): + def __init__(self, parent_context, class_value, tree_name, generics_manager): + self.inference_state = class_value.inference_state + self.parent_context = parent_context + self._class_value = class_value + self._tree_name = tree_name + self._generics_manager = generics_manager + + def py__class__(self): + return self._class_value + + def get_annotated_class_object(self): + return self._class_value + + def get_qualified_names(self): + return (self.py__name__(),) + + @property + def name(self): + return ValueName(self, self._tree_name) + + def _get_wrapped_value(self): + object_, = builtin_from_name(self.inference_state, 'object').execute_annotation() + return object_ + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._generics_manager) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/conversion.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/conversion.py new file mode 100644 index 000000000..9b36a2e1b --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/conversion.py @@ -0,0 +1,209 @@ +from jedi import debug +from jedi.inference.base_value import ValueSet, \ + NO_VALUES +from jedi.inference.utils import to_list +from jedi.inference.gradual.stub_value import StubModuleValue +from jedi.inference.gradual.typeshed import try_to_load_stub_cached +from jedi.inference.value.decorator import Decoratee + + +def _stub_to_python_value_set(stub_value, ignore_compiled=False): + stub_module_context = stub_value.get_root_context() + if not stub_module_context.is_stub(): + return ValueSet([stub_value]) + + decorates = None + if isinstance(stub_value, Decoratee): + decorates = stub_value._original_value + + was_instance = stub_value.is_instance() + if was_instance: + arguments = getattr(stub_value, '_arguments', None) + stub_value = stub_value.py__class__() + + qualified_names = stub_value.get_qualified_names() + if qualified_names is None: + return NO_VALUES + + was_bound_method = stub_value.is_bound_method() + if was_bound_method: + # Infer the object first. We can infer the method later. + method_name = qualified_names[-1] + qualified_names = qualified_names[:-1] + was_instance = True + arguments = None + + values = _infer_from_stub(stub_module_context, qualified_names, ignore_compiled) + if was_instance: + values = ValueSet.from_sets( + c.execute_with_values() if arguments is None else c.execute(arguments) + for c in values + if c.is_class() + ) + if was_bound_method: + # Now that the instance has been properly created, we can simply get + # the method. + values = values.py__getattribute__(method_name) + if decorates is not None: + values = ValueSet(Decoratee(v, decorates) for v in values) + return values + + +def _infer_from_stub(stub_module_context, qualified_names, ignore_compiled): + from jedi.inference.compiled.mixed import MixedObject + stub_module = stub_module_context.get_value() + assert isinstance(stub_module, (StubModuleValue, MixedObject)), stub_module_context + non_stubs = stub_module.non_stub_value_set + if ignore_compiled: + non_stubs = non_stubs.filter(lambda c: not c.is_compiled()) + for name in qualified_names: + non_stubs = non_stubs.py__getattribute__(name) + return non_stubs + + +@to_list +def _try_stub_to_python_names(names, prefer_stub_to_compiled=False): + for name in names: + module_context = name.get_root_context() + if not module_context.is_stub(): + yield name + continue + + if name.api_type == 'module': + values = convert_values(name.infer(), ignore_compiled=prefer_stub_to_compiled) + if values: + for v in values: + yield v.name + continue + else: + v = name.get_defining_qualified_value() + if v is not None: + converted = _stub_to_python_value_set(v, ignore_compiled=prefer_stub_to_compiled) + if converted: + converted_names = converted.goto(name.get_public_name()) + if converted_names: + for n in converted_names: + if n.get_root_context().is_stub(): + # If it's a stub again, it means we're going in + # a circle. Probably some imports make it a + # stub again. + yield name + else: + yield n + continue + yield name + + +def _load_stub_module(module): + if module.is_stub(): + return module + return try_to_load_stub_cached( + module.inference_state, + import_names=module.string_names, + python_value_set=ValueSet([module]), + parent_module_value=None, + sys_path=module.inference_state.get_sys_path(), + ) + + +@to_list +def _python_to_stub_names(names, fallback_to_python=False): + for name in names: + module_context = name.get_root_context() + if module_context.is_stub(): + yield name + continue + + if name.api_type == 'module': + found_name = False + for n in name.goto(): + if n.api_type == 'module': + values = convert_values(n.infer(), only_stubs=True) + for v in values: + yield v.name + found_name = True + else: + for x in _python_to_stub_names([n], fallback_to_python=fallback_to_python): + yield x + found_name = True + if found_name: + continue + else: + v = name.get_defining_qualified_value() + if v is not None: + converted = to_stub(v) + if converted: + converted_names = converted.goto(name.get_public_name()) + if converted_names: + yield from converted_names + continue + if fallback_to_python: + # This is the part where if we haven't found anything, just return + # the stub name. + yield name + + +def convert_names(names, only_stubs=False, prefer_stubs=False, prefer_stub_to_compiled=True): + if only_stubs and prefer_stubs: + raise ValueError("You cannot use both of only_stubs and prefer_stubs.") + + with debug.increase_indent_cm('convert names'): + if only_stubs or prefer_stubs: + return _python_to_stub_names(names, fallback_to_python=prefer_stubs) + else: + return _try_stub_to_python_names( + names, prefer_stub_to_compiled=prefer_stub_to_compiled) + + +def convert_values(values, only_stubs=False, prefer_stubs=False, ignore_compiled=True): + assert not (only_stubs and prefer_stubs) + with debug.increase_indent_cm('convert values'): + if only_stubs or prefer_stubs: + return ValueSet.from_sets( + to_stub(value) + or (ValueSet({value}) if prefer_stubs else NO_VALUES) + for value in values + ) + else: + return ValueSet.from_sets( + _stub_to_python_value_set(stub_value, ignore_compiled=ignore_compiled) + or ValueSet({stub_value}) + for stub_value in values + ) + + +def to_stub(value): + if value.is_stub(): + return ValueSet([value]) + + was_instance = value.is_instance() + if was_instance: + value = value.py__class__() + + qualified_names = value.get_qualified_names() + stub_module = _load_stub_module(value.get_root_context().get_value()) + if stub_module is None or qualified_names is None: + return NO_VALUES + + was_bound_method = value.is_bound_method() + if was_bound_method: + # Infer the object first. We can infer the method later. + method_name = qualified_names[-1] + qualified_names = qualified_names[:-1] + was_instance = True + + stub_values = ValueSet([stub_module]) + for name in qualified_names: + stub_values = stub_values.py__getattribute__(name) + + if was_instance: + stub_values = ValueSet.from_sets( + c.execute_with_values() + for c in stub_values + if c.is_class() + ) + if was_bound_method: + # Now that the instance has been properly created, we can simply get + # the method. + stub_values = stub_values.py__getattribute__(method_name) + return stub_values diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/generics.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/generics.py new file mode 100644 index 000000000..f4a5ae9c0 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/generics.py @@ -0,0 +1,101 @@ +""" +This module is about generics, like the `int` in `List[int]`. It's not about +the Generic class. +""" + +from jedi import debug +from jedi.cache import memoize_method +from jedi.inference.utils import to_tuple +from jedi.inference.base_value import ValueSet, NO_VALUES +from jedi.inference.value.iterable import SequenceLiteralValue +from jedi.inference.helpers import is_string + + +def _resolve_forward_references(context, value_set): + for value in value_set: + if is_string(value): + from jedi.inference.gradual.annotation import _get_forward_reference_node + node = _get_forward_reference_node(context, value.get_safe_value()) + if node is not None: + for c in context.infer_node(node): + yield c + else: + yield value + + +class _AbstractGenericManager: + def get_index_and_execute(self, index): + try: + return self[index].execute_annotation() + except IndexError: + debug.warning('No param #%s found for annotation %s', index, self) + return NO_VALUES + + def get_type_hint(self): + return '[%s]' % ', '.join(t.get_type_hint(add_class_info=False) for t in self.to_tuple()) + + +class LazyGenericManager(_AbstractGenericManager): + def __init__(self, context_of_index, index_value): + self._context_of_index = context_of_index + self._index_value = index_value + + @memoize_method + def __getitem__(self, index): + return self._tuple()[index]() + + def __len__(self): + return len(self._tuple()) + + @memoize_method + @to_tuple + def _tuple(self): + def lambda_scoping_in_for_loop_sucks(lazy_value): + return lambda: ValueSet(_resolve_forward_references( + self._context_of_index, + lazy_value.infer() + )) + + if isinstance(self._index_value, SequenceLiteralValue): + for lazy_value in self._index_value.py__iter__(contextualized_node=None): + yield lambda_scoping_in_for_loop_sucks(lazy_value) + else: + yield lambda: ValueSet(_resolve_forward_references( + self._context_of_index, + ValueSet([self._index_value]) + )) + + @to_tuple + def to_tuple(self): + for callable_ in self._tuple(): + yield callable_() + + def is_homogenous_tuple(self): + if isinstance(self._index_value, SequenceLiteralValue): + entries = self._index_value.get_tree_entries() + if len(entries) == 2 and entries[1] == '...': + return True + return False + + def __repr__(self): + return '[%s]' % (', '.join(repr(x) for x in self.to_tuple())) + + +class TupleGenericManager(_AbstractGenericManager): + def __init__(self, tup): + self._tuple = tup + + def __getitem__(self, index): + return self._tuple[index] + + def __len__(self): + return len(self._tuple) + + def to_tuple(self): + return self._tuple + + def is_homogenous_tuple(self): + return False + + def __repr__(self): + return '[%s]' % (', '.join(repr(x) for x in self.to_tuple())) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/stub_value.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/stub_value.py new file mode 100644 index 000000000..c2e2bc4e2 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/stub_value.py @@ -0,0 +1,100 @@ +from jedi.inference.base_value import ValueWrapper +from jedi.inference.value.module import ModuleValue +from jedi.inference.filters import ParserTreeFilter +from jedi.inference.names import StubName, StubModuleName +from jedi.inference.gradual.typing import TypingModuleFilterWrapper +from jedi.inference.context import ModuleContext + + +class StubModuleValue(ModuleValue): + _module_name_class = StubModuleName + + def __init__(self, non_stub_value_set, *args, **kwargs): + super().__init__(*args, **kwargs) + self.non_stub_value_set = non_stub_value_set + + def is_stub(self): + return True + + def sub_modules_dict(self): + """ + We have to overwrite this, because it's possible to have stubs that + don't have code for all the child modules. At the time of writing this + there are for example no stubs for `json.tool`. + """ + names = {} + for value in self.non_stub_value_set: + try: + method = value.sub_modules_dict + except AttributeError: + pass + else: + names.update(method()) + names.update(super().sub_modules_dict()) + return names + + def _get_stub_filters(self, origin_scope): + return [StubFilter( + parent_context=self.as_context(), + origin_scope=origin_scope + )] + list(self.iter_star_filters()) + + def get_filters(self, origin_scope=None): + filters = super().get_filters(origin_scope) + next(filters, None) # Ignore the first filter and replace it with our own + stub_filters = self._get_stub_filters(origin_scope=origin_scope) + yield from stub_filters + yield from filters + + def _as_context(self): + return StubModuleContext(self) + + +class StubModuleContext(ModuleContext): + def get_filters(self, until_position=None, origin_scope=None): + # Make sure to ignore the position, because positions are not relevant + # for stubs. + return super().get_filters(origin_scope=origin_scope) + + +class TypingModuleWrapper(StubModuleValue): + def get_filters(self, *args, **kwargs): + filters = super().get_filters(*args, **kwargs) + f = next(filters, None) + assert f is not None + yield TypingModuleFilterWrapper(f) + yield from filters + + def _as_context(self): + return TypingModuleContext(self) + + +class TypingModuleContext(ModuleContext): + def get_filters(self, *args, **kwargs): + filters = super().get_filters(*args, **kwargs) + yield TypingModuleFilterWrapper(next(filters, None)) + yield from filters + + +class StubFilter(ParserTreeFilter): + name_class = StubName + + def _is_name_reachable(self, name): + if not super()._is_name_reachable(name): + return False + + # Imports in stub files are only public if they have an "as" + # export. + definition = name.get_definition() + if definition.type in ('import_from', 'import_name'): + if name.parent.type not in ('import_as_name', 'dotted_as_name'): + return False + n = name.value + # TODO rewrite direct return + if n.startswith('_') and not (n.startswith('__') and n.endswith('__')): + return False + return True + + +class VersionInfo(ValueWrapper): + pass diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/type_var.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/type_var.py new file mode 100644 index 000000000..c09773f1d --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/type_var.py @@ -0,0 +1,127 @@ +from jedi import debug +from jedi.inference.base_value import ValueSet, NO_VALUES, ValueWrapper +from jedi.inference.gradual.base import BaseTypingValue + + +class TypeVarClass(ValueWrapper): + def py__call__(self, arguments): + unpacked = arguments.unpack() + + key, lazy_value = next(unpacked, (None, None)) + var_name = self._find_string_name(lazy_value) + # The name must be given, otherwise it's useless. + if var_name is None or key is not None: + debug.warning('Found a variable without a name %s', arguments) + return NO_VALUES + + return ValueSet([TypeVar.create_cached( + self.inference_state, + self.parent_context, + tree_name=self.tree_node.name, + var_name=var_name, + unpacked_args=unpacked, + )]) + + def _find_string_name(self, lazy_value): + if lazy_value is None: + return None + + value_set = lazy_value.infer() + if not value_set: + return None + if len(value_set) > 1: + debug.warning('Found multiple values for a type variable: %s', value_set) + + name_value = next(iter(value_set)) + try: + method = name_value.get_safe_value + except AttributeError: + return None + else: + safe_value = method(default=None) + if isinstance(safe_value, str): + return safe_value + return None + + +class TypeVar(BaseTypingValue): + def __init__(self, parent_context, tree_name, var_name, unpacked_args): + super().__init__(parent_context, tree_name) + self._var_name = var_name + + self._constraints_lazy_values = [] + self._bound_lazy_value = None + self._covariant_lazy_value = None + self._contravariant_lazy_value = None + for key, lazy_value in unpacked_args: + if key is None: + self._constraints_lazy_values.append(lazy_value) + else: + if key == 'bound': + self._bound_lazy_value = lazy_value + elif key == 'covariant': + self._covariant_lazy_value = lazy_value + elif key == 'contravariant': + self._contra_variant_lazy_value = lazy_value + else: + debug.warning('Invalid TypeVar param name %s', key) + + def py__name__(self): + return self._var_name + + def get_filters(self, *args, **kwargs): + return iter([]) + + def _get_classes(self): + if self._bound_lazy_value is not None: + return self._bound_lazy_value.infer() + if self._constraints_lazy_values: + return self.constraints + debug.warning('Tried to infer the TypeVar %s without a given type', self._var_name) + return NO_VALUES + + def is_same_class(self, other): + # Everything can match an undefined type var. + return True + + @property + def constraints(self): + return ValueSet.from_sets( + lazy.infer() for lazy in self._constraints_lazy_values + ) + + def define_generics(self, type_var_dict): + try: + found = type_var_dict[self.py__name__()] + except KeyError: + pass + else: + if found: + return found + return ValueSet({self}) + + def execute_annotation(self): + return self._get_classes().execute_annotation() + + def infer_type_vars(self, value_set): + def iterate(): + for v in value_set: + cls = v.py__class__() + if v.is_function() or v.is_class(): + cls = TypeWrapper(cls, v) + yield cls + + annotation_name = self.py__name__() + return {annotation_name: ValueSet(iterate())} + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.py__name__()) + + +class TypeWrapper(ValueWrapper): + def __init__(self, wrapped_value, original_value): + super().__init__(wrapped_value) + self._original_value = original_value + + def execute_annotation(self): + return ValueSet({self._original_value}) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/typeshed.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/typeshed.py new file mode 100644 index 000000000..50217cd34 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/typeshed.py @@ -0,0 +1,310 @@ +import os +import re +from functools import wraps +from collections import namedtuple +from typing import Dict, Mapping, Tuple +from pathlib import Path + +from jedi import settings +from jedi.file_io import FileIO +from jedi.parser_utils import get_cached_code_lines +from jedi.inference.base_value import ValueSet, NO_VALUES +from jedi.inference.gradual.stub_value import TypingModuleWrapper, StubModuleValue +from jedi.inference.value import ModuleValue + +_jedi_path = Path(__file__).parent.parent.parent +TYPESHED_PATH = _jedi_path.joinpath('third_party', 'typeshed') +DJANGO_INIT_PATH = _jedi_path.joinpath('third_party', 'django-stubs', + 'django-stubs', '__init__.pyi') + +_IMPORT_MAP = dict( + _collections='collections', + _socket='socket', +) + +PathInfo = namedtuple('PathInfo', 'path is_third_party') + + +def _merge_create_stub_map(path_infos): + map_ = {} + for directory_path_info in path_infos: + map_.update(_create_stub_map(directory_path_info)) + return map_ + + +def _create_stub_map(directory_path_info): + """ + Create a mapping of an importable name in Python to a stub file. + """ + def generate(): + try: + listed = os.listdir(directory_path_info.path) + except (FileNotFoundError, NotADirectoryError): + return + + for entry in listed: + path = os.path.join(directory_path_info.path, entry) + if os.path.isdir(path): + init = os.path.join(path, '__init__.pyi') + if os.path.isfile(init): + yield entry, PathInfo(init, directory_path_info.is_third_party) + elif entry.endswith('.pyi') and os.path.isfile(path): + name = entry[:-4] + if name != '__init__': + yield name, PathInfo(path, directory_path_info.is_third_party) + + # Create a dictionary from the tuple generator. + return dict(generate()) + + +def _get_typeshed_directories(version_info): + check_version_list = ['2and3', '3'] + for base in ['stdlib', 'third_party']: + base_path = TYPESHED_PATH.joinpath(base) + base_list = os.listdir(base_path) + for base_list_entry in base_list: + match = re.match(r'(\d+)\.(\d+)$', base_list_entry) + if match is not None: + if match.group(1) == '3' and int(match.group(2)) <= version_info.minor: + check_version_list.append(base_list_entry) + + for check_version in check_version_list: + is_third_party = base != 'stdlib' + yield PathInfo(str(base_path.joinpath(check_version)), is_third_party) + + +_version_cache: Dict[Tuple[int, int], Mapping[str, PathInfo]] = {} + + +def _cache_stub_file_map(version_info): + """ + Returns a map of an importable name in Python to a stub file. + """ + # TODO this caches the stub files indefinitely, maybe use a time cache + # for that? + version = version_info[:2] + try: + return _version_cache[version] + except KeyError: + pass + + _version_cache[version] = file_set = \ + _merge_create_stub_map(_get_typeshed_directories(version_info)) + return file_set + + +def import_module_decorator(func): + @wraps(func) + def wrapper(inference_state, import_names, parent_module_value, sys_path, prefer_stubs): + python_value_set = inference_state.module_cache.get(import_names) + if python_value_set is None: + if parent_module_value is not None and parent_module_value.is_stub(): + parent_module_values = parent_module_value.non_stub_value_set + else: + parent_module_values = [parent_module_value] + if import_names == ('os', 'path'): + # This is a huge exception, we follow a nested import + # ``os.path``, because it's a very important one in Python + # that is being achieved by messing with ``sys.modules`` in + # ``os``. + python_value_set = ValueSet.from_sets( + func(inference_state, (n,), None, sys_path,) + for n in ['posixpath', 'ntpath', 'macpath', 'os2emxpath'] + ) + else: + python_value_set = ValueSet.from_sets( + func(inference_state, import_names, p, sys_path,) + for p in parent_module_values + ) + inference_state.module_cache.add(import_names, python_value_set) + + if not prefer_stubs or import_names[0] in settings.auto_import_modules: + return python_value_set + + stub = try_to_load_stub_cached(inference_state, import_names, python_value_set, + parent_module_value, sys_path) + if stub is not None: + return ValueSet([stub]) + return python_value_set + + return wrapper + + +def try_to_load_stub_cached(inference_state, import_names, *args, **kwargs): + if import_names is None: + return None + + try: + return inference_state.stub_module_cache[import_names] + except KeyError: + pass + + # TODO is this needed? where are the exceptions coming from that make this + # necessary? Just remove this line. + inference_state.stub_module_cache[import_names] = None + inference_state.stub_module_cache[import_names] = result = \ + _try_to_load_stub(inference_state, import_names, *args, **kwargs) + return result + + +def _try_to_load_stub(inference_state, import_names, python_value_set, + parent_module_value, sys_path): + """ + Trying to load a stub for a set of import_names. + + This is modelled to work like "PEP 561 -- Distributing and Packaging Type + Information", see https://www.python.org/dev/peps/pep-0561. + """ + if parent_module_value is None and len(import_names) > 1: + try: + parent_module_value = try_to_load_stub_cached( + inference_state, import_names[:-1], NO_VALUES, + parent_module_value=None, sys_path=sys_path) + except KeyError: + pass + + # 1. Try to load foo-stubs folders on path for import name foo. + if len(import_names) == 1: + # foo-stubs + for p in sys_path: + init = os.path.join(p, *import_names) + '-stubs' + os.path.sep + '__init__.pyi' + m = _try_to_load_stub_from_file( + inference_state, + python_value_set, + file_io=FileIO(init), + import_names=import_names, + ) + if m is not None: + return m + if import_names[0] == 'django' and python_value_set: + return _try_to_load_stub_from_file( + inference_state, + python_value_set, + file_io=FileIO(str(DJANGO_INIT_PATH)), + import_names=import_names, + ) + + # 2. Try to load pyi files next to py files. + for c in python_value_set: + try: + method = c.py__file__ + except AttributeError: + pass + else: + file_path = method() + file_paths = [] + if c.is_namespace(): + file_paths = [os.path.join(p, '__init__.pyi') for p in c.py__path__()] + elif file_path is not None and file_path.suffix == '.py': + file_paths = [str(file_path) + 'i'] + + for file_path in file_paths: + m = _try_to_load_stub_from_file( + inference_state, + python_value_set, + # The file path should end with .pyi + file_io=FileIO(file_path), + import_names=import_names, + ) + if m is not None: + return m + + # 3. Try to load typeshed + m = _load_from_typeshed(inference_state, python_value_set, parent_module_value, import_names) + if m is not None: + return m + + # 4. Try to load pyi file somewhere if python_value_set was not defined. + if not python_value_set: + if parent_module_value is not None: + check_path = parent_module_value.py__path__() or [] + # In case import_names + names_for_path = (import_names[-1],) + else: + check_path = sys_path + names_for_path = import_names + + for p in check_path: + m = _try_to_load_stub_from_file( + inference_state, + python_value_set, + file_io=FileIO(os.path.join(p, *names_for_path) + '.pyi'), + import_names=import_names, + ) + if m is not None: + return m + + # If no stub is found, that's fine, the calling function has to deal with + # it. + return None + + +def _load_from_typeshed(inference_state, python_value_set, parent_module_value, import_names): + import_name = import_names[-1] + map_ = None + if len(import_names) == 1: + map_ = _cache_stub_file_map(inference_state.grammar.version_info) + import_name = _IMPORT_MAP.get(import_name, import_name) + elif isinstance(parent_module_value, ModuleValue): + if not parent_module_value.is_package(): + # Only if it's a package (= a folder) something can be + # imported. + return None + paths = parent_module_value.py__path__() + # Once the initial package has been loaded, the sub packages will + # always be loaded, regardless if they are there or not. This makes + # sense, IMO, because stubs take preference, even if the original + # library doesn't provide a module (it could be dynamic). ~dave + map_ = _merge_create_stub_map([PathInfo(p, is_third_party=False) for p in paths]) + + if map_ is not None: + path_info = map_.get(import_name) + if path_info is not None and (not path_info.is_third_party or python_value_set): + return _try_to_load_stub_from_file( + inference_state, + python_value_set, + file_io=FileIO(path_info.path), + import_names=import_names, + ) + + +def _try_to_load_stub_from_file(inference_state, python_value_set, file_io, import_names): + try: + stub_module_node = parse_stub_module(inference_state, file_io) + except OSError: + # The file that you're looking for doesn't exist (anymore). + return None + else: + return create_stub_module( + inference_state, inference_state.latest_grammar, python_value_set, + stub_module_node, file_io, import_names + ) + + +def parse_stub_module(inference_state, file_io): + return inference_state.parse( + file_io=file_io, + cache=True, + diff_cache=settings.fast_parser, + cache_path=settings.cache_directory, + use_latest_grammar=True + ) + + +def create_stub_module(inference_state, grammar, python_value_set, + stub_module_node, file_io, import_names): + if import_names == ('typing',): + module_cls = TypingModuleWrapper + else: + module_cls = StubModuleValue + file_name = os.path.basename(file_io.path) + stub_module_value = module_cls( + python_value_set, inference_state, stub_module_node, + file_io=file_io, + string_names=import_names, + # The code was loaded with latest_grammar, so use + # that. + code_lines=get_cached_code_lines(grammar, file_io.path), + is_package=file_name == '__init__.pyi', + ) + return stub_module_value diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/typing.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/typing.py new file mode 100644 index 000000000..c2f08a334 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/typing.py @@ -0,0 +1,485 @@ +""" +We need to somehow work with the typing objects. Since the typing objects are +pretty bare we need to add all the Jedi customizations to make them work as +values. + +This file deals with all the typing.py cases. +""" +import itertools + +from jedi import debug +from jedi.inference.compiled import builtin_from_name, create_simple_object +from jedi.inference.base_value import ValueSet, NO_VALUES, Value, \ + LazyValueWrapper, ValueWrapper +from jedi.inference.lazy_value import LazyKnownValues +from jedi.inference.arguments import repack_with_argument_clinic +from jedi.inference.filters import FilterWrapper +from jedi.inference.names import NameWrapper, ValueName +from jedi.inference.value.klass import ClassMixin +from jedi.inference.gradual.base import BaseTypingValue, \ + BaseTypingClassWithGenerics, BaseTypingInstance +from jedi.inference.gradual.type_var import TypeVarClass +from jedi.inference.gradual.generics import LazyGenericManager, TupleGenericManager + +_PROXY_CLASS_TYPES = 'Tuple Generic Protocol Callable Type'.split() +_TYPE_ALIAS_TYPES = { + 'List': 'builtins.list', + 'Dict': 'builtins.dict', + 'Set': 'builtins.set', + 'FrozenSet': 'builtins.frozenset', + 'ChainMap': 'collections.ChainMap', + 'Counter': 'collections.Counter', + 'DefaultDict': 'collections.defaultdict', + 'Deque': 'collections.deque', +} +_PROXY_TYPES = 'Optional Union ClassVar'.split() + + +class TypingModuleName(NameWrapper): + def infer(self): + return ValueSet(self._remap()) + + def _remap(self): + name = self.string_name + inference_state = self.parent_context.inference_state + try: + actual = _TYPE_ALIAS_TYPES[name] + except KeyError: + pass + else: + yield TypeAlias.create_cached( + inference_state, self.parent_context, self.tree_name, actual) + return + + if name in _PROXY_CLASS_TYPES: + yield ProxyTypingClassValue.create_cached( + inference_state, self.parent_context, self.tree_name) + elif name in _PROXY_TYPES: + yield ProxyTypingValue.create_cached( + inference_state, self.parent_context, self.tree_name) + elif name == 'runtime': + # We don't want anything here, not sure what this function is + # supposed to do, since it just appears in the stubs and shouldn't + # have any effects there (because it's never executed). + return + elif name == 'TypeVar': + cls, = self._wrapped_name.infer() + yield TypeVarClass.create_cached(inference_state, cls) + elif name == 'Any': + yield AnyClass.create_cached( + inference_state, self.parent_context, self.tree_name) + elif name == 'TYPE_CHECKING': + # This is needed for e.g. imports that are only available for type + # checking or are in cycles. The user can then check this variable. + yield builtin_from_name(inference_state, 'True') + elif name == 'overload': + yield OverloadFunction.create_cached( + inference_state, self.parent_context, self.tree_name) + elif name == 'NewType': + v, = self._wrapped_name.infer() + yield NewTypeFunction.create_cached(inference_state, v) + elif name == 'cast': + cast_fn, = self._wrapped_name.infer() + yield CastFunction.create_cached(inference_state, cast_fn) + elif name == 'TypedDict': + # TODO doesn't even exist in typeshed/typing.py, yet. But will be + # added soon. + yield TypedDictClass.create_cached( + inference_state, self.parent_context, self.tree_name) + else: + # Not necessary, as long as we are not doing type checking: + # no_type_check & no_type_check_decorator + # Everything else shouldn't be relevant... + yield from self._wrapped_name.infer() + + +class TypingModuleFilterWrapper(FilterWrapper): + name_wrapper_class = TypingModuleName + + +class ProxyWithGenerics(BaseTypingClassWithGenerics): + def execute_annotation(self): + string_name = self._tree_name.value + + if string_name == 'Union': + # This is kind of a special case, because we have Unions (in Jedi + # ValueSets). + return self.gather_annotation_classes().execute_annotation() + elif string_name == 'Optional': + # Optional is basically just saying it's either None or the actual + # type. + return self.gather_annotation_classes().execute_annotation() \ + | ValueSet([builtin_from_name(self.inference_state, 'None')]) + elif string_name == 'Type': + # The type is actually already given in the index_value + return self._generics_manager[0] + elif string_name == 'ClassVar': + # For now don't do anything here, ClassVars are always used. + return self._generics_manager[0].execute_annotation() + + mapped = { + 'Tuple': Tuple, + 'Generic': Generic, + 'Protocol': Protocol, + 'Callable': Callable, + } + cls = mapped[string_name] + return ValueSet([cls( + self.parent_context, + self, + self._tree_name, + generics_manager=self._generics_manager, + )]) + + def gather_annotation_classes(self): + return ValueSet.from_sets(self._generics_manager.to_tuple()) + + def _create_instance_with_generics(self, generics_manager): + return ProxyWithGenerics( + self.parent_context, + self._tree_name, + generics_manager + ) + + def infer_type_vars(self, value_set): + annotation_generics = self.get_generics() + + if not annotation_generics: + return {} + + annotation_name = self.py__name__() + if annotation_name == 'Optional': + # Optional[T] is equivalent to Union[T, None]. In Jedi unions + # are represented by members within a ValueSet, so we extract + # the T from the Optional[T] by removing the None value. + none = builtin_from_name(self.inference_state, 'None') + return annotation_generics[0].infer_type_vars( + value_set.filter(lambda x: x != none), + ) + + return {} + + +class ProxyTypingValue(BaseTypingValue): + index_class = ProxyWithGenerics + + def with_generics(self, generics_tuple): + return self.index_class.create_cached( + self.inference_state, + self.parent_context, + self._tree_name, + generics_manager=TupleGenericManager(generics_tuple) + ) + + def py__getitem__(self, index_value_set, contextualized_node): + return ValueSet( + self.index_class.create_cached( + self.inference_state, + self.parent_context, + self._tree_name, + generics_manager=LazyGenericManager( + context_of_index=contextualized_node.context, + index_value=index_value, + ) + ) for index_value in index_value_set + ) + + +class _TypingClassMixin(ClassMixin): + def py__bases__(self): + return [LazyKnownValues( + self.inference_state.builtins_module.py__getattribute__('object') + )] + + def get_metaclasses(self): + return [] + + @property + def name(self): + return ValueName(self, self._tree_name) + + +class TypingClassWithGenerics(ProxyWithGenerics, _TypingClassMixin): + def infer_type_vars(self, value_set): + type_var_dict = {} + annotation_generics = self.get_generics() + + if not annotation_generics: + return type_var_dict + + annotation_name = self.py__name__() + if annotation_name == 'Type': + return annotation_generics[0].infer_type_vars( + # This is basically a trick to avoid extra code: We execute the + # incoming classes to be able to use the normal code for type + # var inference. + value_set.execute_annotation(), + ) + + elif annotation_name == 'Callable': + if len(annotation_generics) == 2: + return annotation_generics[1].infer_type_vars( + value_set.execute_annotation(), + ) + + elif annotation_name == 'Tuple': + tuple_annotation, = self.execute_annotation() + return tuple_annotation.infer_type_vars(value_set) + + return type_var_dict + + def _create_instance_with_generics(self, generics_manager): + return TypingClassWithGenerics( + self.parent_context, + self._tree_name, + generics_manager + ) + + +class ProxyTypingClassValue(ProxyTypingValue, _TypingClassMixin): + index_class = TypingClassWithGenerics + + +class TypeAlias(LazyValueWrapper): + def __init__(self, parent_context, origin_tree_name, actual): + self.inference_state = parent_context.inference_state + self.parent_context = parent_context + self._origin_tree_name = origin_tree_name + self._actual = actual # e.g. builtins.list + + @property + def name(self): + return ValueName(self, self._origin_tree_name) + + def py__name__(self): + return self.name.string_name + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._actual) + + def _get_wrapped_value(self): + module_name, class_name = self._actual.split('.') + + # TODO use inference_state.import_module? + from jedi.inference.imports import Importer + module, = Importer( + self.inference_state, [module_name], self.inference_state.builtins_module + ).follow() + classes = module.py__getattribute__(class_name) + # There should only be one, because it's code that we control. + assert len(classes) == 1, classes + cls = next(iter(classes)) + return cls + + def gather_annotation_classes(self): + return ValueSet([self._get_wrapped_value()]) + + def get_signatures(self): + return [] + + +class Callable(BaseTypingInstance): + def py__call__(self, arguments): + """ + def x() -> Callable[[Callable[..., _T]], _T]: ... + """ + # The 0th index are the arguments. + try: + param_values = self._generics_manager[0] + result_values = self._generics_manager[1] + except IndexError: + debug.warning('Callable[...] defined without two arguments') + return NO_VALUES + else: + from jedi.inference.gradual.annotation import infer_return_for_callable + return infer_return_for_callable(arguments, param_values, result_values) + + +class Tuple(BaseTypingInstance): + def _is_homogenous(self): + # To specify a variable-length tuple of homogeneous type, Tuple[T, ...] + # is used. + return self._generics_manager.is_homogenous_tuple() + + def py__simple_getitem__(self, index): + if self._is_homogenous(): + return self._generics_manager.get_index_and_execute(0) + else: + if isinstance(index, int): + return self._generics_manager.get_index_and_execute(index) + + debug.dbg('The getitem type on Tuple was %s' % index) + return NO_VALUES + + def py__iter__(self, contextualized_node=None): + if self._is_homogenous(): + yield LazyKnownValues(self._generics_manager.get_index_and_execute(0)) + else: + for v in self._generics_manager.to_tuple(): + yield LazyKnownValues(v.execute_annotation()) + + def py__getitem__(self, index_value_set, contextualized_node): + if self._is_homogenous(): + return self._generics_manager.get_index_and_execute(0) + + return ValueSet.from_sets( + self._generics_manager.to_tuple() + ).execute_annotation() + + def _get_wrapped_value(self): + tuple_, = self.inference_state.builtins_module \ + .py__getattribute__('tuple').execute_annotation() + return tuple_ + + @property + def name(self): + return self._wrapped_value.name + + def infer_type_vars(self, value_set): + # Circular + from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts + + value_set = value_set.filter( + lambda x: x.py__name__().lower() == 'tuple', + ) + + if self._is_homogenous(): + # The parameter annotation is of the form `Tuple[T, ...]`, + # so we treat the incoming tuple like a iterable sequence + # rather than a positional container of elements. + return self._class_value.get_generics()[0].infer_type_vars( + value_set.merge_types_of_iterate(), + ) + + else: + # The parameter annotation has only explicit type parameters + # (e.g: `Tuple[T]`, `Tuple[T, U]`, `Tuple[T, U, V]`, etc.) so we + # treat the incoming values as needing to match the annotation + # exactly, just as we would for non-tuple annotations. + + type_var_dict = {} + for element in value_set: + try: + method = element.get_annotated_class_object + except AttributeError: + # This might still happen, because the tuple name matching + # above is not 100% correct, so just catch the remaining + # cases here. + continue + + py_class = method() + merge_type_var_dicts( + type_var_dict, + merge_pairwise_generics(self._class_value, py_class), + ) + + return type_var_dict + + +class Generic(BaseTypingInstance): + pass + + +class Protocol(BaseTypingInstance): + pass + + +class AnyClass(BaseTypingValue): + def execute_annotation(self): + debug.warning('Used Any - returned no results') + return NO_VALUES + + +class OverloadFunction(BaseTypingValue): + @repack_with_argument_clinic('func, /') + def py__call__(self, func_value_set): + # Just pass arguments through. + return func_value_set + + +class NewTypeFunction(ValueWrapper): + def py__call__(self, arguments): + ordered_args = arguments.unpack() + next(ordered_args, (None, None)) + _, second_arg = next(ordered_args, (None, None)) + if second_arg is None: + return NO_VALUES + return ValueSet( + NewType( + self.inference_state, + contextualized_node.context, + contextualized_node.node, + second_arg.infer(), + ) for contextualized_node in arguments.get_calling_nodes()) + + +class NewType(Value): + def __init__(self, inference_state, parent_context, tree_node, type_value_set): + super().__init__(inference_state, parent_context) + self._type_value_set = type_value_set + self.tree_node = tree_node + + def py__class__(self): + c, = self._type_value_set.py__class__() + return c + + def py__call__(self, arguments): + return self._type_value_set.execute_annotation() + + @property + def name(self): + from jedi.inference.compiled.value import CompiledValueName + return CompiledValueName(self, 'NewType') + + def __repr__(self) -> str: + return '%s' % (self.tree_node, self._type_value_set) + + +class CastFunction(ValueWrapper): + @repack_with_argument_clinic('type, object, /') + def py__call__(self, type_value_set, object_value_set): + return type_value_set.execute_annotation() + + +class TypedDictClass(BaseTypingValue): + """ + This class has no responsibilities and is just here to make sure that typed + dicts can be identified. + """ + + +class TypedDict(LazyValueWrapper): + """Represents the instance version of ``TypedDictClass``.""" + def __init__(self, definition_class): + self.inference_state = definition_class.inference_state + self.parent_context = definition_class.parent_context + self.tree_node = definition_class.tree_node + self._definition_class = definition_class + + @property + def name(self): + return ValueName(self, self.tree_node.name) + + def py__simple_getitem__(self, index): + if isinstance(index, str): + return ValueSet.from_sets( + name.infer() + for filter in self._definition_class.get_filters(is_instance=True) + for name in filter.get(index) + ) + return NO_VALUES + + def get_key_values(self): + filtered_values = itertools.chain.from_iterable(( + f.values() + for f in self._definition_class.get_filters(is_instance=True) + )) + return ValueSet({ + create_simple_object(self.inference_state, v.string_name) + for v in filtered_values + }) + + def _get_wrapped_value(self): + d, = self.inference_state.builtins_module.py__getattribute__('dict') + result, = d.execute_with_values() + return result diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/utils.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/utils.py new file mode 100644 index 000000000..af3703c7a --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/gradual/utils.py @@ -0,0 +1,35 @@ +from pathlib import Path + +from jedi.inference.gradual.typeshed import TYPESHED_PATH, create_stub_module + + +def load_proper_stub_module(inference_state, grammar, file_io, import_names, module_node): + """ + This function is given a random .pyi file and should return the proper + module. + """ + path = file_io.path + path = Path(path) + assert path.suffix == '.pyi' + try: + relative_path = path.relative_to(TYPESHED_PATH) + except ValueError: + pass + else: + # /[...]/stdlib/3/os/__init__.pyi -> stdlib/3/os/__init__ + rest = relative_path.with_suffix('') + # Remove the stdlib/3 or third_party/3.6 part + import_names = rest.parts[2:] + if rest.name == '__init__': + import_names = import_names[:-1] + + if import_names is not None: + actual_value_set = inference_state.import_module(import_names, prefer_stubs=False) + + stub = create_stub_module( + inference_state, grammar, actual_value_set, + module_node, file_io, import_names + ) + inference_state.stub_module_cache[import_names] = stub + return stub + return None diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/helpers.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/helpers.py new file mode 100644 index 000000000..0e344c240 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/helpers.py @@ -0,0 +1,202 @@ +import copy +import sys +import re +import os +from itertools import chain +from contextlib import contextmanager + +from parso.python import tree + + +def is_stdlib_path(path): + # Python standard library paths look like this: + # /usr/lib/python3.9/... + # TODO The implementation below is probably incorrect and not complete. + parts = path.parts + if 'dist-packages' in parts or 'site-packages' in parts: + return False + + base_path = os.path.join(sys.prefix, 'lib', 'python') + return bool(re.match(re.escape(base_path) + r'\d.\d', str(path))) + + +def deep_ast_copy(obj): + """ + Much, much faster than copy.deepcopy, but just for parser tree nodes. + """ + # If it's already in the cache, just return it. + new_obj = copy.copy(obj) + + # Copy children + new_children = [] + for child in obj.children: + if isinstance(child, tree.Leaf): + new_child = copy.copy(child) + new_child.parent = new_obj + else: + new_child = deep_ast_copy(child) + new_child.parent = new_obj + new_children.append(new_child) + new_obj.children = new_children + + return new_obj + + +def infer_call_of_leaf(context, leaf, cut_own_trailer=False): + """ + Creates a "call" node that consist of all ``trailer`` and ``power`` + objects. E.g. if you call it with ``append``:: + + list([]).append(3) or None + + You would get a node with the content ``list([]).append`` back. + + This generates a copy of the original ast node. + + If you're using the leaf, e.g. the bracket `)` it will return ``list([])``. + + We use this function for two purposes. Given an expression ``bar.foo``, + we may want to + - infer the type of ``foo`` to offer completions after foo + - infer the type of ``bar`` to be able to jump to the definition of foo + The option ``cut_own_trailer`` must be set to true for the second purpose. + """ + trailer = leaf.parent + if trailer.type == 'fstring': + from jedi.inference import compiled + return compiled.get_string_value_set(context.inference_state) + + # The leaf may not be the last or first child, because there exist three + # different trailers: `( x )`, `[ x ]` and `.x`. In the first two examples + # we should not match anything more than x. + if trailer.type != 'trailer' or leaf not in (trailer.children[0], trailer.children[-1]): + if leaf == ':': + # Basically happens with foo[:] when the cursor is on the colon + from jedi.inference.base_value import NO_VALUES + return NO_VALUES + if trailer.type == 'atom': + return context.infer_node(trailer) + return context.infer_node(leaf) + + power = trailer.parent + index = power.children.index(trailer) + if cut_own_trailer: + cut = index + else: + cut = index + 1 + + if power.type == 'error_node': + start = index + while True: + start -= 1 + base = power.children[start] + if base.type != 'trailer': + break + trailers = power.children[start + 1:cut] + else: + base = power.children[0] + trailers = power.children[1:cut] + + if base == 'await': + base = trailers[0] + trailers = trailers[1:] + + values = context.infer_node(base) + from jedi.inference.syntax_tree import infer_trailer + for trailer in trailers: + values = infer_trailer(context, values, trailer) + return values + + +def get_names_of_node(node): + try: + children = node.children + except AttributeError: + if node.type == 'name': + return [node] + else: + return [] + else: + return list(chain.from_iterable(get_names_of_node(c) for c in children)) + + +def is_string(value): + return value.is_compiled() and isinstance(value.get_safe_value(default=None), str) + + +def is_literal(value): + return is_number(value) or is_string(value) + + +def _get_safe_value_or_none(value, accept): + value = value.get_safe_value(default=None) + if isinstance(value, accept): + return value + + +def get_int_or_none(value): + return _get_safe_value_or_none(value, int) + + +def get_str_or_none(value): + return _get_safe_value_or_none(value, str) + + +def is_number(value): + return _get_safe_value_or_none(value, (int, float)) is not None + + +class SimpleGetItemNotFound(Exception): + pass + + +@contextmanager +def reraise_getitem_errors(*exception_classes): + try: + yield + except exception_classes as e: + raise SimpleGetItemNotFound(e) + + +def parse_dotted_names(nodes, is_import_from, until_node=None): + level = 0 + names = [] + for node in nodes[1:]: + if node in ('.', '...'): + if not names: + level += len(node.value) + elif node.type == 'dotted_name': + for n in node.children[::2]: + names.append(n) + if n is until_node: + break + else: + continue + break + elif node.type == 'name': + names.append(node) + if node is until_node: + break + elif node == ',': + if not is_import_from: + names = [] + else: + # Here if the keyword `import` comes along it stops checking + # for names. + break + return level, names + + +def values_from_qualified_names(inference_state, *names): + return inference_state.import_module(names[:-1]).py__getattribute__(names[-1]) + + +def is_big_annoying_library(context): + string_names = context.get_root_context().string_names + if string_names is None: + return False + + # Especially pandas and tensorflow are huge complicated Python libraries + # that get even slower than they already are when Jedi tries to undrstand + # dynamic features like decorators, ifs and other stuff. + return string_names[0] in ('pandas', 'numpy', 'tensorflow', 'matplotlib') diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/imports.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/imports.py new file mode 100644 index 000000000..c1a4953fb --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/imports.py @@ -0,0 +1,592 @@ +""" +:mod:`jedi.inference.imports` is here to resolve import statements and return +the modules/classes/functions/whatever, which they stand for. However there's +not any actual importing done. This module is about finding modules in the +filesystem. This can be quite tricky sometimes, because Python imports are not +always that simple. + +This module also supports import autocompletion, which means to complete +statements like ``from datetim`` (cursor at the end would return ``datetime``). +""" +import os +from pathlib import Path + +from parso.python import tree +from parso.tree import search_ancestor + +from jedi import debug +from jedi import settings +from jedi.file_io import FolderIO +from jedi.parser_utils import get_cached_code_lines +from jedi.inference import sys_path +from jedi.inference import helpers +from jedi.inference import compiled +from jedi.inference import analysis +from jedi.inference.utils import unite +from jedi.inference.cache import inference_state_method_cache +from jedi.inference.names import ImportName, SubModuleName +from jedi.inference.base_value import ValueSet, NO_VALUES +from jedi.inference.gradual.typeshed import import_module_decorator, \ + create_stub_module, parse_stub_module +from jedi.inference.compiled.subprocess.functions import ImplicitNSInfo +from jedi.plugins import plugin_manager + + +class ModuleCache: + def __init__(self): + self._name_cache = {} + + def add(self, string_names, value_set): + if string_names is not None: + self._name_cache[string_names] = value_set + + def get(self, string_names): + return self._name_cache.get(string_names) + + +# This memoization is needed, because otherwise we will infinitely loop on +# certain imports. +@inference_state_method_cache(default=NO_VALUES) +def infer_import(context, tree_name): + module_context = context.get_root_context() + from_import_name, import_path, level, values = \ + _prepare_infer_import(module_context, tree_name) + if values: + + if from_import_name is not None: + values = values.py__getattribute__( + from_import_name, + name_context=context, + analysis_errors=False + ) + + if not values: + path = import_path + (from_import_name,) + importer = Importer(context.inference_state, path, module_context, level) + values = importer.follow() + debug.dbg('after import: %s', values) + return values + + +@inference_state_method_cache(default=[]) +def goto_import(context, tree_name): + module_context = context.get_root_context() + from_import_name, import_path, level, values = \ + _prepare_infer_import(module_context, tree_name) + if not values: + return [] + + if from_import_name is not None: + names = unite([ + c.goto( + from_import_name, + name_context=context, + analysis_errors=False + ) for c in values + ]) + # Avoid recursion on the same names. + if names and not any(n.tree_name is tree_name for n in names): + return names + + path = import_path + (from_import_name,) + importer = Importer(context.inference_state, path, module_context, level) + values = importer.follow() + return set(s.name for s in values) + + +def _prepare_infer_import(module_context, tree_name): + import_node = search_ancestor(tree_name, 'import_name', 'import_from') + import_path = import_node.get_path_for_name(tree_name) + from_import_name = None + try: + from_names = import_node.get_from_names() + except AttributeError: + # Is an import_name + pass + else: + if len(from_names) + 1 == len(import_path): + # We have to fetch the from_names part first and then check + # if from_names exists in the modules. + from_import_name = import_path[-1] + import_path = from_names + + importer = Importer(module_context.inference_state, tuple(import_path), + module_context, import_node.level) + + return from_import_name, tuple(import_path), import_node.level, importer.follow() + + +def _add_error(value, name, message): + if hasattr(name, 'parent') and value is not None: + analysis.add(value, 'import-error', name, message) + else: + debug.warning('ImportError without origin: ' + message) + + +def _level_to_base_import_path(project_path, directory, level): + """ + In case the level is outside of the currently known package (something like + import .....foo), we can still try our best to help the user for + completions. + """ + for i in range(level - 1): + old = directory + directory = os.path.dirname(directory) + if old == directory: + return None, None + + d = directory + level_import_paths = [] + # Now that we are on the level that the user wants to be, calculate the + # import path for it. + while True: + if d == project_path: + return level_import_paths, d + dir_name = os.path.basename(d) + if dir_name: + level_import_paths.insert(0, dir_name) + d = os.path.dirname(d) + else: + return None, directory + + +class Importer: + def __init__(self, inference_state, import_path, module_context, level=0): + """ + An implementation similar to ``__import__``. Use `follow` + to actually follow the imports. + + *level* specifies whether to use absolute or relative imports. 0 (the + default) means only perform absolute imports. Positive values for level + indicate the number of parent directories to search relative to the + directory of the module calling ``__import__()`` (see PEP 328 for the + details). + + :param import_path: List of namespaces (strings or Names). + """ + debug.speed('import %s %s' % (import_path, module_context)) + self._inference_state = inference_state + self.level = level + self._module_context = module_context + + self._fixed_sys_path = None + self._infer_possible = True + if level: + base = module_context.get_value().py__package__() + # We need to care for two cases, the first one is if it's a valid + # Python import. This import has a properly defined module name + # chain like `foo.bar.baz` and an import in baz is made for + # `..lala.` It can then resolve to `foo.bar.lala`. + # The else here is a heuristic for all other cases, if for example + # in `foo` you search for `...bar`, it's obviously out of scope. + # However since Jedi tries to just do it's best, we help the user + # here, because he might have specified something wrong in his + # project. + if level <= len(base): + # Here we basically rewrite the level to 0. + base = tuple(base) + if level > 1: + base = base[:-level + 1] + import_path = base + tuple(import_path) + else: + path = module_context.py__file__() + project_path = self._inference_state.project.path + import_path = list(import_path) + if path is None: + # If no path is defined, our best guess is that the current + # file is edited by a user on the current working + # directory. We need to add an initial path, because it + # will get removed as the name of the current file. + directory = project_path + else: + directory = os.path.dirname(path) + + base_import_path, base_directory = _level_to_base_import_path( + project_path, directory, level, + ) + if base_directory is None: + # Everything is lost, the relative import does point + # somewhere out of the filesystem. + self._infer_possible = False + else: + self._fixed_sys_path = [base_directory] + + if base_import_path is None: + if import_path: + _add_error( + module_context, import_path[0], + message='Attempted relative import beyond top-level package.' + ) + else: + import_path = base_import_path + import_path + self.import_path = import_path + + @property + def _str_import_path(self): + """Returns the import path as pure strings instead of `Name`.""" + return tuple( + name.value if isinstance(name, tree.Name) else name + for name in self.import_path + ) + + def _sys_path_with_modifications(self, is_completion): + if self._fixed_sys_path is not None: + return self._fixed_sys_path + + return ( + # For import completions we don't want to see init paths, but for + # inference we want to show the user as much as possible. + # See GH #1446. + self._inference_state.get_sys_path(add_init_paths=not is_completion) + + [ + str(p) for p + in sys_path.check_sys_path_modifications(self._module_context) + ] + ) + + def follow(self): + if not self.import_path: + if self._fixed_sys_path: + # This is a bit of a special case, that maybe should be + # revisited. If the project path is wrong or the user uses + # relative imports the wrong way, we might end up here, where + # the `fixed_sys_path == project.path` in that case we kind of + # use the project.path.parent directory as our path. This is + # usually not a problem, except if imports in other places are + # using the same names. Example: + # + # foo/ < #1 + # - setup.py + # - foo/ < #2 + # - __init__.py + # - foo.py < #3 + # + # If the top foo is our project folder and somebody uses + # `from . import foo` in `setup.py`, it will resolve to foo #2, + # which means that the import for foo.foo is cached as + # `__init__.py` (#2) and not as `foo.py` (#3). This is usually + # not an issue, because this case is probably pretty rare, but + # might be an issue for some people. + # + # However for most normal cases where we work with different + # file names, this code path hits where we basically change the + # project path to an ancestor of project path. + from jedi.inference.value.namespace import ImplicitNamespaceValue + import_path = (os.path.basename(self._fixed_sys_path[0]),) + ns = ImplicitNamespaceValue( + self._inference_state, + string_names=import_path, + paths=self._fixed_sys_path, + ) + return ValueSet({ns}) + return NO_VALUES + if not self._infer_possible: + return NO_VALUES + + # Check caches first + from_cache = self._inference_state.stub_module_cache.get(self._str_import_path) + if from_cache is not None: + return ValueSet({from_cache}) + from_cache = self._inference_state.module_cache.get(self._str_import_path) + if from_cache is not None: + return from_cache + + sys_path = self._sys_path_with_modifications(is_completion=False) + + return import_module_by_names( + self._inference_state, self.import_path, sys_path, self._module_context + ) + + def _get_module_names(self, search_path=None, in_module=None): + """ + Get the names of all modules in the search_path. This means file names + and not names defined in the files. + """ + if search_path is None: + sys_path = self._sys_path_with_modifications(is_completion=True) + else: + sys_path = search_path + return list(iter_module_names( + self._inference_state, self._module_context, sys_path, + module_cls=ImportName if in_module is None else SubModuleName, + add_builtin_modules=search_path is None and in_module is None, + )) + + def completion_names(self, inference_state, only_modules=False): + """ + :param only_modules: Indicates wheter it's possible to import a + definition that is not defined in a module. + """ + if not self._infer_possible: + return [] + + names = [] + if self.import_path: + # flask + if self._str_import_path == ('flask', 'ext'): + # List Flask extensions like ``flask_foo`` + for mod in self._get_module_names(): + modname = mod.string_name + if modname.startswith('flask_'): + extname = modname[len('flask_'):] + names.append(ImportName(self._module_context, extname)) + # Now the old style: ``flaskext.foo`` + for dir in self._sys_path_with_modifications(is_completion=True): + flaskext = os.path.join(dir, 'flaskext') + if os.path.isdir(flaskext): + names += self._get_module_names([flaskext]) + + values = self.follow() + for value in values: + # Non-modules are not completable. + if value.api_type not in ('module', 'namespace'): # not a module + continue + if not value.is_compiled(): + # sub_modules_dict is not implemented for compiled modules. + names += value.sub_modules_dict().values() + + if not only_modules: + from jedi.inference.gradual.conversion import convert_values + + both_values = values | convert_values(values) + for c in both_values: + for filter in c.get_filters(): + names += filter.values() + else: + if self.level: + # We only get here if the level cannot be properly calculated. + names += self._get_module_names(self._fixed_sys_path) + else: + # This is just the list of global imports. + names += self._get_module_names() + return names + + +def import_module_by_names(inference_state, import_names, sys_path=None, + module_context=None, prefer_stubs=True): + if sys_path is None: + sys_path = inference_state.get_sys_path() + + str_import_names = tuple( + i.value if isinstance(i, tree.Name) else i + for i in import_names + ) + value_set = [None] + for i, name in enumerate(import_names): + value_set = ValueSet.from_sets([ + import_module( + inference_state, + str_import_names[:i+1], + parent_module_value, + sys_path, + prefer_stubs=prefer_stubs, + ) for parent_module_value in value_set + ]) + if not value_set: + message = 'No module named ' + '.'.join(str_import_names) + if module_context is not None: + _add_error(module_context, name, message) + else: + debug.warning(message) + return NO_VALUES + return value_set + + +@plugin_manager.decorate() +@import_module_decorator +def import_module(inference_state, import_names, parent_module_value, sys_path): + """ + This method is very similar to importlib's `_gcd_import`. + """ + if import_names[0] in settings.auto_import_modules: + module = _load_builtin_module(inference_state, import_names, sys_path) + if module is None: + return NO_VALUES + return ValueSet([module]) + + module_name = '.'.join(import_names) + if parent_module_value is None: + # Override the sys.path. It works only good that way. + # Injecting the path directly into `find_module` did not work. + file_io_or_ns, is_pkg = inference_state.compiled_subprocess.get_module_info( + string=import_names[-1], + full_name=module_name, + sys_path=sys_path, + is_global_search=True, + ) + if is_pkg is None: + return NO_VALUES + else: + paths = parent_module_value.py__path__() + if paths is None: + # The module might not be a package. + return NO_VALUES + + file_io_or_ns, is_pkg = inference_state.compiled_subprocess.get_module_info( + string=import_names[-1], + path=paths, + full_name=module_name, + is_global_search=False, + ) + if is_pkg is None: + return NO_VALUES + + if isinstance(file_io_or_ns, ImplicitNSInfo): + from jedi.inference.value.namespace import ImplicitNamespaceValue + module = ImplicitNamespaceValue( + inference_state, + string_names=tuple(file_io_or_ns.name.split('.')), + paths=file_io_or_ns.paths, + ) + elif file_io_or_ns is None: + module = _load_builtin_module(inference_state, import_names, sys_path) + if module is None: + return NO_VALUES + else: + module = _load_python_module( + inference_state, file_io_or_ns, + import_names=import_names, + is_package=is_pkg, + ) + + if parent_module_value is None: + debug.dbg('global search_module %s: %s', import_names[-1], module) + else: + debug.dbg('search_module %s in paths %s: %s', module_name, paths, module) + return ValueSet([module]) + + +def _load_python_module(inference_state, file_io, + import_names=None, is_package=False): + module_node = inference_state.parse( + file_io=file_io, + cache=True, + diff_cache=settings.fast_parser, + cache_path=settings.cache_directory, + ) + + from jedi.inference.value import ModuleValue + return ModuleValue( + inference_state, module_node, + file_io=file_io, + string_names=import_names, + code_lines=get_cached_code_lines(inference_state.grammar, file_io.path), + is_package=is_package, + ) + + +def _load_builtin_module(inference_state, import_names=None, sys_path=None): + project = inference_state.project + if sys_path is None: + sys_path = inference_state.get_sys_path() + if not project._load_unsafe_extensions: + safe_paths = project._get_base_sys_path(inference_state) + sys_path = [p for p in sys_path if p in safe_paths] + + dotted_name = '.'.join(import_names) + assert dotted_name is not None + module = compiled.load_module(inference_state, dotted_name=dotted_name, sys_path=sys_path) + if module is None: + # The file might raise an ImportError e.g. and therefore not be + # importable. + return None + return module + + +def load_module_from_path(inference_state, file_io, import_names=None, is_package=None): + """ + This should pretty much only be used for get_modules_containing_name. It's + here to ensure that a random path is still properly loaded into the Jedi + module structure. + """ + path = Path(file_io.path) + if import_names is None: + e_sys_path = inference_state.get_sys_path() + import_names, is_package = sys_path.transform_path_to_dotted(e_sys_path, path) + else: + assert isinstance(is_package, bool) + + is_stub = path.suffix == '.pyi' + if is_stub: + folder_io = file_io.get_parent_folder() + if folder_io.path.endswith('-stubs'): + folder_io = FolderIO(folder_io.path[:-6]) + if path.name == '__init__.pyi': + python_file_io = folder_io.get_file_io('__init__.py') + else: + python_file_io = folder_io.get_file_io(import_names[-1] + '.py') + + try: + v = load_module_from_path( + inference_state, python_file_io, + import_names, is_package=is_package + ) + values = ValueSet([v]) + except FileNotFoundError: + values = NO_VALUES + + return create_stub_module( + inference_state, inference_state.latest_grammar, values, + parse_stub_module(inference_state, file_io), file_io, import_names + ) + else: + module = _load_python_module( + inference_state, file_io, + import_names=import_names, + is_package=is_package, + ) + inference_state.module_cache.add(import_names, ValueSet([module])) + return module + + +def load_namespace_from_path(inference_state, folder_io): + import_names, is_package = sys_path.transform_path_to_dotted( + inference_state.get_sys_path(), + Path(folder_io.path) + ) + from jedi.inference.value.namespace import ImplicitNamespaceValue + return ImplicitNamespaceValue(inference_state, import_names, [folder_io.path]) + + +def follow_error_node_imports_if_possible(context, name): + error_node = tree.search_ancestor(name, 'error_node') + if error_node is not None: + # Get the first command start of a started simple_stmt. The error + # node is sometimes a small_stmt and sometimes a simple_stmt. Check + # for ; leaves that start a new statements. + start_index = 0 + for index, n in enumerate(error_node.children): + if n.start_pos > name.start_pos: + break + if n == ';': + start_index = index + 1 + nodes = error_node.children[start_index:] + first_name = nodes[0].get_first_leaf().value + + # Make it possible to infer stuff like `import foo.` or + # `from foo.bar`. + if first_name in ('from', 'import'): + is_import_from = first_name == 'from' + level, names = helpers.parse_dotted_names( + nodes, + is_import_from=is_import_from, + until_node=name, + ) + return Importer( + context.inference_state, names, context.get_root_context(), level).follow() + return None + + +def iter_module_names(inference_state, module_context, search_path, + module_cls=ImportName, add_builtin_modules=True): + """ + Get the names of all modules in the search_path. This means file names + and not names defined in the files. + """ + # add builtin module names + if add_builtin_modules: + for name in inference_state.compiled_subprocess.get_builtin_module_names(): + yield module_cls(module_context, name) + + for name in inference_state.compiled_subprocess.iter_module_names(search_path): + yield module_cls(module_context, name) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/lazy_value.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/lazy_value.py new file mode 100644 index 000000000..b149f21e6 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/lazy_value.py @@ -0,0 +1,61 @@ +from jedi.inference.base_value import ValueSet, NO_VALUES +from jedi.common import monkeypatch + + +class AbstractLazyValue: + def __init__(self, data, min=1, max=1): + self.data = data + self.min = min + self.max = max + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.data) + + def infer(self): + raise NotImplementedError + + +class LazyKnownValue(AbstractLazyValue): + """data is a Value.""" + def infer(self): + return ValueSet([self.data]) + + +class LazyKnownValues(AbstractLazyValue): + """data is a ValueSet.""" + def infer(self): + return self.data + + +class LazyUnknownValue(AbstractLazyValue): + def __init__(self, min=1, max=1): + super().__init__(None, min, max) + + def infer(self): + return NO_VALUES + + +class LazyTreeValue(AbstractLazyValue): + def __init__(self, context, node, min=1, max=1): + super().__init__(node, min, max) + self.context = context + # We need to save the predefined names. It's an unfortunate side effect + # that needs to be tracked otherwise results will be wrong. + self._predefined_names = dict(context.predefined_names) + + def infer(self): + with monkeypatch(self.context, 'predefined_names', self._predefined_names): + return self.context.infer_node(self.data) + + +def get_merged_lazy_value(lazy_values): + if len(lazy_values) > 1: + return MergedLazyValues(lazy_values) + else: + return lazy_values[0] + + +class MergedLazyValues(AbstractLazyValue): + """data is a list of lazy values.""" + def infer(self): + return ValueSet.from_sets(l.infer() for l in self.data) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/names.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/names.py new file mode 100644 index 000000000..f446deb98 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/names.py @@ -0,0 +1,677 @@ +from abc import abstractmethod +from inspect import Parameter +from typing import Optional, Tuple + +from parso.tree import search_ancestor + +from jedi.parser_utils import find_statement_documentation, clean_scope_docstring +from jedi.inference.utils import unite +from jedi.inference.base_value import ValueSet, NO_VALUES +from jedi.inference.cache import inference_state_method_cache +from jedi.inference import docstrings +from jedi.cache import memoize_method +from jedi.inference.helpers import deep_ast_copy, infer_call_of_leaf +from jedi.plugins import plugin_manager + + +def _merge_name_docs(names): + doc = '' + for name in names: + if doc: + # In case we have multiple values, just return all of them + # separated by a few dashes. + doc += '\n' + '-' * 30 + '\n' + doc += name.py__doc__() + return doc + + +class AbstractNameDefinition: + start_pos: Optional[Tuple[int, int]] = None + string_name: str + parent_context = None + tree_name = None + is_value_name = True + """ + Used for the Jedi API to know if it's a keyword or an actual name. + """ + + @abstractmethod + def infer(self): + raise NotImplementedError + + @abstractmethod + def goto(self): + # Typically names are already definitions and therefore a goto on that + # name will always result on itself. + return {self} + + def get_qualified_names(self, include_module_names=False): + qualified_names = self._get_qualified_names() + if qualified_names is None or not include_module_names: + return qualified_names + + module_names = self.get_root_context().string_names + if module_names is None: + return None + return module_names + qualified_names + + def _get_qualified_names(self): + # By default, a name has no qualified names. + return None + + def get_root_context(self): + return self.parent_context.get_root_context() + + def get_public_name(self): + return self.string_name + + def __repr__(self): + if self.start_pos is None: + return '<%s: string_name=%s>' % (self.__class__.__name__, self.string_name) + return '<%s: string_name=%s start_pos=%s>' % (self.__class__.__name__, + self.string_name, self.start_pos) + + def is_import(self): + return False + + def py__doc__(self): + return '' + + @property + def api_type(self): + return self.parent_context.api_type + + def get_defining_qualified_value(self): + """ + Returns either None or the value that is public and qualified. Won't + return a function, because a name in a function is never public. + """ + return None + + +class AbstractArbitraryName(AbstractNameDefinition): + """ + When you e.g. want to complete dicts keys, you probably want to complete + string literals, which is not really a name, but for Jedi we use this + concept of Name for completions as well. + """ + is_value_name = False + + def __init__(self, inference_state, string): + self.inference_state = inference_state + self.string_name = string + self.parent_context = inference_state.builtins_module + + def infer(self): + return NO_VALUES + + +class AbstractTreeName(AbstractNameDefinition): + def __init__(self, parent_context, tree_name): + self.parent_context = parent_context + self.tree_name = tree_name + + def get_qualified_names(self, include_module_names=False): + import_node = search_ancestor(self.tree_name, 'import_name', 'import_from') + # For import nodes we cannot just have names, because it's very unclear + # how they would look like. For now we just ignore them in most cases. + # In case of level == 1, it works always, because it's like a submodule + # lookup. + if import_node is not None and not (import_node.level == 1 + and self.get_root_context().get_value().is_package()): + # TODO improve the situation for when level is present. + if include_module_names and not import_node.level: + return tuple(n.value for n in import_node.get_path_for_name(self.tree_name)) + else: + return None + + return super().get_qualified_names(include_module_names) + + def _get_qualified_names(self): + parent_names = self.parent_context.get_qualified_names() + if parent_names is None: + return None + return parent_names + (self.tree_name.value,) + + def get_defining_qualified_value(self): + if self.is_import(): + raise NotImplementedError("Shouldn't really happen, please report") + elif self.parent_context: + return self.parent_context.get_value() # Might be None + return None + + def goto(self): + context = self.parent_context + name = self.tree_name + definition = name.get_definition(import_name_always=True) + if definition is not None: + type_ = definition.type + if type_ == 'expr_stmt': + # Only take the parent, because if it's more complicated than just + # a name it's something you can "goto" again. + is_simple_name = name.parent.type not in ('power', 'trailer') + if is_simple_name: + return [self] + elif type_ in ('import_from', 'import_name'): + from jedi.inference.imports import goto_import + module_names = goto_import(context, name) + return module_names + else: + return [self] + else: + from jedi.inference.imports import follow_error_node_imports_if_possible + values = follow_error_node_imports_if_possible(context, name) + if values is not None: + return [value.name for value in values] + + par = name.parent + node_type = par.type + if node_type == 'argument' and par.children[1] == '=' and par.children[0] == name: + # Named param goto. + trailer = par.parent + if trailer.type == 'arglist': + trailer = trailer.parent + if trailer.type != 'classdef': + if trailer.type == 'decorator': + value_set = context.infer_node(trailer.children[1]) + else: + i = trailer.parent.children.index(trailer) + to_infer = trailer.parent.children[:i] + if to_infer[0] == 'await': + to_infer.pop(0) + value_set = context.infer_node(to_infer[0]) + from jedi.inference.syntax_tree import infer_trailer + for trailer in to_infer[1:]: + value_set = infer_trailer(context, value_set, trailer) + param_names = [] + for value in value_set: + for signature in value.get_signatures(): + for param_name in signature.get_param_names(): + if param_name.string_name == name.value: + param_names.append(param_name) + return param_names + elif node_type == 'dotted_name': # Is a decorator. + index = par.children.index(name) + if index > 0: + new_dotted = deep_ast_copy(par) + new_dotted.children[index - 1:] = [] + values = context.infer_node(new_dotted) + return unite( + value.goto(name, name_context=context) + for value in values + ) + + if node_type == 'trailer' and par.children[0] == '.': + values = infer_call_of_leaf(context, name, cut_own_trailer=True) + return values.goto(name, name_context=context) + else: + stmt = search_ancestor( + name, 'expr_stmt', 'lambdef' + ) or name + if stmt.type == 'lambdef': + stmt = name + return context.goto(name, position=stmt.start_pos) + + def is_import(self): + imp = search_ancestor(self.tree_name, 'import_from', 'import_name') + return imp is not None + + @property + def string_name(self): + return self.tree_name.value + + @property + def start_pos(self): + return self.tree_name.start_pos + + +class ValueNameMixin: + def infer(self): + return ValueSet([self._value]) + + def py__doc__(self): + doc = self._value.py__doc__() + if not doc and self._value.is_stub(): + from jedi.inference.gradual.conversion import convert_names + names = convert_names([self], prefer_stub_to_compiled=False) + if self not in names: + return _merge_name_docs(names) + return doc + + def _get_qualified_names(self): + return self._value.get_qualified_names() + + def get_root_context(self): + if self.parent_context is None: # A module + return self._value.as_context() + return super().get_root_context() + + def get_defining_qualified_value(self): + context = self.parent_context + if context.is_module() or context.is_class(): + return self.parent_context.get_value() # Might be None + return None + + @property + def api_type(self): + return self._value.api_type + + +class ValueName(ValueNameMixin, AbstractTreeName): + def __init__(self, value, tree_name): + super().__init__(value.parent_context, tree_name) + self._value = value + + def goto(self): + return ValueSet([self._value.name]) + + +class TreeNameDefinition(AbstractTreeName): + _API_TYPES = dict( + import_name='module', + import_from='module', + funcdef='function', + param='param', + classdef='class', + ) + + def infer(self): + # Refactor this, should probably be here. + from jedi.inference.syntax_tree import tree_name_to_values + return tree_name_to_values( + self.parent_context.inference_state, + self.parent_context, + self.tree_name + ) + + @property + def api_type(self): + definition = self.tree_name.get_definition(import_name_always=True) + if definition is None: + return 'statement' + return self._API_TYPES.get(definition.type, 'statement') + + def assignment_indexes(self): + """ + Returns an array of tuple(int, node) of the indexes that are used in + tuple assignments. + + For example if the name is ``y`` in the following code:: + + x, (y, z) = 2, '' + + would result in ``[(1, xyz_node), (0, yz_node)]``. + + When searching for b in the case ``a, *b, c = [...]`` it will return:: + + [(slice(1, -1), abc_node)] + """ + indexes = [] + is_star_expr = False + node = self.tree_name.parent + compare = self.tree_name + while node is not None: + if node.type in ('testlist', 'testlist_comp', 'testlist_star_expr', 'exprlist'): + for i, child in enumerate(node.children): + if child == compare: + index = int(i / 2) + if is_star_expr: + from_end = int((len(node.children) - i) / 2) + index = slice(index, -from_end) + indexes.insert(0, (index, node)) + break + else: + raise LookupError("Couldn't find the assignment.") + is_star_expr = False + elif node.type == 'star_expr': + is_star_expr = True + elif node.type in ('expr_stmt', 'sync_comp_for'): + break + + compare = node + node = node.parent + return indexes + + @property + def inference_state(self): + # Used by the cache function below + return self.parent_context.inference_state + + @inference_state_method_cache(default='') + def py__doc__(self): + api_type = self.api_type + if api_type in ('function', 'class', 'property'): + if self.parent_context.get_root_context().is_stub(): + from jedi.inference.gradual.conversion import convert_names + names = convert_names([self], prefer_stub_to_compiled=False) + if self not in names: + return _merge_name_docs(names) + + # Make sure the names are not TreeNameDefinitions anymore. + return clean_scope_docstring(self.tree_name.get_definition()) + + if api_type == 'module': + names = self.goto() + if self not in names: + return _merge_name_docs(names) + + if api_type == 'statement' and self.tree_name.is_definition(): + return find_statement_documentation(self.tree_name.get_definition()) + return '' + + +class _ParamMixin: + def maybe_positional_argument(self, include_star=True): + options = [Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD] + if include_star: + options.append(Parameter.VAR_POSITIONAL) + return self.get_kind() in options + + def maybe_keyword_argument(self, include_stars=True): + options = [Parameter.KEYWORD_ONLY, Parameter.POSITIONAL_OR_KEYWORD] + if include_stars: + options.append(Parameter.VAR_KEYWORD) + return self.get_kind() in options + + def _kind_string(self): + kind = self.get_kind() + if kind == Parameter.VAR_POSITIONAL: # *args + return '*' + if kind == Parameter.VAR_KEYWORD: # **kwargs + return '**' + return '' + + def get_qualified_names(self, include_module_names=False): + return None + + +class ParamNameInterface(_ParamMixin): + api_type = 'param' + + def get_kind(self): + raise NotImplementedError + + def to_string(self): + raise NotImplementedError + + def get_executed_param_name(self): + """ + For dealing with type inference and working around the graph, we + sometimes want to have the param name of the execution. This feels a + bit strange and we might have to refactor at some point. + + For now however it exists to avoid infering params when we don't really + need them (e.g. when we can just instead use annotations. + """ + return None + + @property + def star_count(self): + kind = self.get_kind() + if kind == Parameter.VAR_POSITIONAL: + return 1 + if kind == Parameter.VAR_KEYWORD: + return 2 + return 0 + + def infer_default(self): + return NO_VALUES + + +class BaseTreeParamName(ParamNameInterface, AbstractTreeName): + annotation_node = None + default_node = None + + def to_string(self): + output = self._kind_string() + self.get_public_name() + annotation = self.annotation_node + default = self.default_node + if annotation is not None: + output += ': ' + annotation.get_code(include_prefix=False) + if default is not None: + output += '=' + default.get_code(include_prefix=False) + return output + + def get_public_name(self): + name = self.string_name + if name.startswith('__'): + # Params starting with __ are an equivalent to positional only + # variables in typeshed. + name = name[2:] + return name + + def goto(self, **kwargs): + return [self] + + +class _ActualTreeParamName(BaseTreeParamName): + def __init__(self, function_value, tree_name): + super().__init__( + function_value.get_default_param_context(), tree_name) + self.function_value = function_value + + def _get_param_node(self): + return search_ancestor(self.tree_name, 'param') + + @property + def annotation_node(self): + return self._get_param_node().annotation + + def infer_annotation(self, execute_annotation=True, ignore_stars=False): + from jedi.inference.gradual.annotation import infer_param + values = infer_param( + self.function_value, self._get_param_node(), + ignore_stars=ignore_stars) + if execute_annotation: + values = values.execute_annotation() + return values + + def infer_default(self): + node = self.default_node + if node is None: + return NO_VALUES + return self.parent_context.infer_node(node) + + @property + def default_node(self): + return self._get_param_node().default + + def get_kind(self): + tree_param = self._get_param_node() + if tree_param.star_count == 1: # *args + return Parameter.VAR_POSITIONAL + if tree_param.star_count == 2: # **kwargs + return Parameter.VAR_KEYWORD + + # Params starting with __ are an equivalent to positional only + # variables in typeshed. + if tree_param.name.value.startswith('__'): + return Parameter.POSITIONAL_ONLY + + parent = tree_param.parent + param_appeared = False + for p in parent.children: + if param_appeared: + if p == '/': + return Parameter.POSITIONAL_ONLY + else: + if p == '*': + return Parameter.KEYWORD_ONLY + if p.type == 'param': + if p.star_count: + return Parameter.KEYWORD_ONLY + if p == tree_param: + param_appeared = True + return Parameter.POSITIONAL_OR_KEYWORD + + def infer(self): + values = self.infer_annotation() + if values: + return values + + doc_params = docstrings.infer_param(self.function_value, self._get_param_node()) + return doc_params + + +class AnonymousParamName(_ActualTreeParamName): + @plugin_manager.decorate(name='goto_anonymous_param') + def goto(self): + return super().goto() + + @plugin_manager.decorate(name='infer_anonymous_param') + def infer(self): + values = super().infer() + if values: + return values + from jedi.inference.dynamic_params import dynamic_param_lookup + param = self._get_param_node() + values = dynamic_param_lookup(self.function_value, param.position_index) + if values: + return values + + if param.star_count == 1: + from jedi.inference.value.iterable import FakeTuple + value = FakeTuple(self.function_value.inference_state, []) + elif param.star_count == 2: + from jedi.inference.value.iterable import FakeDict + value = FakeDict(self.function_value.inference_state, {}) + elif param.default is None: + return NO_VALUES + else: + return self.function_value.parent_context.infer_node(param.default) + return ValueSet({value}) + + +class ParamName(_ActualTreeParamName): + def __init__(self, function_value, tree_name, arguments): + super().__init__(function_value, tree_name) + self.arguments = arguments + + def infer(self): + values = super().infer() + if values: + return values + + return self.get_executed_param_name().infer() + + def get_executed_param_name(self): + from jedi.inference.param import get_executed_param_names + params_names = get_executed_param_names(self.function_value, self.arguments) + return params_names[self._get_param_node().position_index] + + +class ParamNameWrapper(_ParamMixin): + def __init__(self, param_name): + self._wrapped_param_name = param_name + + def __getattr__(self, name): + return getattr(self._wrapped_param_name, name) + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._wrapped_param_name) + + +class ImportName(AbstractNameDefinition): + start_pos = (1, 0) + _level = 0 + + def __init__(self, parent_context, string_name): + self._from_module_context = parent_context + self.string_name = string_name + + def get_qualified_names(self, include_module_names=False): + if include_module_names: + if self._level: + assert self._level == 1, "Everything else is not supported for now" + module_names = self._from_module_context.string_names + if module_names is None: + return module_names + return module_names + (self.string_name,) + return (self.string_name,) + return () + + @property + def parent_context(self): + m = self._from_module_context + import_values = self.infer() + if not import_values: + return m + # It's almost always possible to find the import or to not find it. The + # importing returns only one value, pretty much always. + return next(iter(import_values)).as_context() + + @memoize_method + def infer(self): + from jedi.inference.imports import Importer + m = self._from_module_context + return Importer(m.inference_state, [self.string_name], m, level=self._level).follow() + + def goto(self): + return [m.name for m in self.infer()] + + @property + def api_type(self): + return 'module' + + def py__doc__(self): + return _merge_name_docs(self.goto()) + + +class SubModuleName(ImportName): + _level = 1 + + +class NameWrapper: + def __init__(self, wrapped_name): + self._wrapped_name = wrapped_name + + def __getattr__(self, name): + return getattr(self._wrapped_name, name) + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, self._wrapped_name) + + +class StubNameMixin: + def py__doc__(self): + from jedi.inference.gradual.conversion import convert_names + # Stubs are not complicated and we can just follow simple statements + # that have an equals in them, because they typically make something + # else public. See e.g. stubs for `requests`. + names = [self] + if self.api_type == 'statement' and '=' in self.tree_name.get_definition().children: + names = [v.name for v in self.infer()] + + names = convert_names(names, prefer_stub_to_compiled=False) + if self in names: + return super().py__doc__() + else: + # We have signatures ourselves in stubs, so don't use signatures + # from the implementation. + return _merge_name_docs(names) + + +# From here on down we make looking up the sys.version_info fast. +class StubName(StubNameMixin, TreeNameDefinition): + def infer(self): + inferred = super().infer() + if self.string_name == 'version_info' and self.get_root_context().py__name__() == 'sys': + from jedi.inference.gradual.stub_value import VersionInfo + return ValueSet(VersionInfo(c) for c in inferred) + return inferred + + +class ModuleName(ValueNameMixin, AbstractNameDefinition): + start_pos = 1, 0 + + def __init__(self, value, name): + self._value = value + self._name = name + + @property + def string_name(self): + return self._name + + +class StubModuleName(StubNameMixin, ModuleName): + pass diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/param.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/param.py new file mode 100644 index 000000000..1f296215d --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/param.py @@ -0,0 +1,257 @@ +from collections import defaultdict +from inspect import Parameter + +from jedi import debug +from jedi.inference.utils import PushBackIterator +from jedi.inference import analysis +from jedi.inference.lazy_value import LazyKnownValue, \ + LazyTreeValue, LazyUnknownValue +from jedi.inference.value import iterable +from jedi.inference.names import ParamName + + +def _add_argument_issue(error_name, lazy_value, message): + if isinstance(lazy_value, LazyTreeValue): + node = lazy_value.data + if node.parent.type == 'argument': + node = node.parent + return analysis.add(lazy_value.context, error_name, node, message) + + +class ExecutedParamName(ParamName): + def __init__(self, function_value, arguments, param_node, lazy_value, is_default=False): + super().__init__(function_value, param_node.name, arguments=arguments) + self._lazy_value = lazy_value + self._is_default = is_default + + def infer(self): + return self._lazy_value.infer() + + def matches_signature(self): + if self._is_default: + return True + argument_values = self.infer().py__class__() + if self.get_kind() in (Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD): + return True + annotations = self.infer_annotation(execute_annotation=False) + if not annotations: + # If we cannot infer annotations - or there aren't any - pretend + # that the signature matches. + return True + matches = any(c1.is_sub_class_of(c2) + for c1 in argument_values + for c2 in annotations.gather_annotation_classes()) + debug.dbg("param compare %s: %s <=> %s", + matches, argument_values, annotations, color='BLUE') + return matches + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.string_name) + + +def get_executed_param_names_and_issues(function_value, arguments): + """ + Return a tuple of: + - a list of `ExecutedParamName`s corresponding to the arguments of the + function execution `function_value`, containing the inferred value of + those arguments (whether explicit or default) + - a list of the issues encountered while building that list + + For example, given: + ``` + def foo(a, b, c=None, d='d'): ... + + foo(42, c='c') + ``` + + Then for the execution of `foo`, this will return a tuple containing: + - a list with entries for each parameter a, b, c & d; the entries for a, + c, & d will have their values (42, 'c' and 'd' respectively) included. + - a list with a single entry about the lack of a value for `b` + """ + def too_many_args(argument): + m = _error_argument_count(funcdef, len(unpacked_va)) + # Just report an error for the first param that is not needed (like + # cPython). + if arguments.get_calling_nodes(): + # There might not be a valid calling node so check for that first. + issues.append( + _add_argument_issue( + 'type-error-too-many-arguments', + argument, + message=m + ) + ) + else: + issues.append(None) + debug.warning('non-public warning: %s', m) + + issues = [] # List[Optional[analysis issue]] + result_params = [] + param_dict = {} + funcdef = function_value.tree_node + # Default params are part of the value where the function was defined. + # This means that they might have access on class variables that the + # function itself doesn't have. + default_param_context = function_value.get_default_param_context() + + for param in funcdef.get_params(): + param_dict[param.name.value] = param + unpacked_va = list(arguments.unpack(funcdef)) + var_arg_iterator = PushBackIterator(iter(unpacked_va)) + + non_matching_keys = defaultdict(lambda: []) + keys_used = {} + keys_only = False + had_multiple_value_error = False + for param in funcdef.get_params(): + # The value and key can both be null. There, the defaults apply. + # args / kwargs will just be empty arrays / dicts, respectively. + # Wrong value count is just ignored. If you try to test cases that are + # not allowed in Python, Jedi will maybe not show any completions. + is_default = False + key, argument = next(var_arg_iterator, (None, None)) + while key is not None: + keys_only = True + try: + key_param = param_dict[key] + except KeyError: + non_matching_keys[key] = argument + else: + if key in keys_used: + had_multiple_value_error = True + m = ("TypeError: %s() got multiple values for keyword argument '%s'." + % (funcdef.name, key)) + for contextualized_node in arguments.get_calling_nodes(): + issues.append( + analysis.add(contextualized_node.context, + 'type-error-multiple-values', + contextualized_node.node, message=m) + ) + else: + keys_used[key] = ExecutedParamName( + function_value, arguments, key_param, argument) + key, argument = next(var_arg_iterator, (None, None)) + + try: + result_params.append(keys_used[param.name.value]) + continue + except KeyError: + pass + + if param.star_count == 1: + # *args param + lazy_value_list = [] + if argument is not None: + lazy_value_list.append(argument) + for key, argument in var_arg_iterator: + # Iterate until a key argument is found. + if key: + var_arg_iterator.push_back((key, argument)) + break + lazy_value_list.append(argument) + seq = iterable.FakeTuple(function_value.inference_state, lazy_value_list) + result_arg = LazyKnownValue(seq) + elif param.star_count == 2: + if argument is not None: + too_many_args(argument) + # **kwargs param + dct = iterable.FakeDict(function_value.inference_state, dict(non_matching_keys)) + result_arg = LazyKnownValue(dct) + non_matching_keys = {} + else: + # normal param + if argument is None: + # No value: Return an empty container + if param.default is None: + result_arg = LazyUnknownValue() + if not keys_only: + for contextualized_node in arguments.get_calling_nodes(): + m = _error_argument_count(funcdef, len(unpacked_va)) + issues.append( + analysis.add( + contextualized_node.context, + 'type-error-too-few-arguments', + contextualized_node.node, + message=m, + ) + ) + else: + result_arg = LazyTreeValue(default_param_context, param.default) + is_default = True + else: + result_arg = argument + + result_params.append(ExecutedParamName( + function_value, arguments, param, result_arg, is_default=is_default + )) + if not isinstance(result_arg, LazyUnknownValue): + keys_used[param.name.value] = result_params[-1] + + if keys_only: + # All arguments should be handed over to the next function. It's not + # about the values inside, it's about the names. Jedi needs to now that + # there's nothing to find for certain names. + for k in set(param_dict) - set(keys_used): + param = param_dict[k] + + if not (non_matching_keys or had_multiple_value_error + or param.star_count or param.default): + # add a warning only if there's not another one. + for contextualized_node in arguments.get_calling_nodes(): + m = _error_argument_count(funcdef, len(unpacked_va)) + issues.append( + analysis.add(contextualized_node.context, + 'type-error-too-few-arguments', + contextualized_node.node, message=m) + ) + + for key, lazy_value in non_matching_keys.items(): + m = "TypeError: %s() got an unexpected keyword argument '%s'." \ + % (funcdef.name, key) + issues.append( + _add_argument_issue( + 'type-error-keyword-argument', + lazy_value, + message=m + ) + ) + + remaining_arguments = list(var_arg_iterator) + if remaining_arguments: + first_key, lazy_value = remaining_arguments[0] + too_many_args(lazy_value) + return result_params, issues + + +def get_executed_param_names(function_value, arguments): + """ + Return a list of `ExecutedParamName`s corresponding to the arguments of the + function execution `function_value`, containing the inferred value of those + arguments (whether explicit or default). Any issues building this list (for + example required arguments which are missing in the invocation) are ignored. + + For example, given: + ``` + def foo(a, b, c=None, d='d'): ... + + foo(42, c='c') + ``` + + Then for the execution of `foo`, this will return a list containing entries + for each parameter a, b, c & d; the entries for a, c, & d will have their + values (42, 'c' and 'd' respectively) included. + """ + return get_executed_param_names_and_issues(function_value, arguments)[0] + + +def _error_argument_count(funcdef, actual_count): + params = funcdef.get_params() + default_arguments = sum(1 for p in params if p.default or p.star_count) + + if default_arguments == 0: + before = 'exactly ' + else: + before = 'from %s to ' % (len(params) - default_arguments) + return ('TypeError: %s() takes %s%s arguments (%s given).' + % (funcdef.name, before, len(params), actual_count)) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/parser_cache.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/parser_cache.py new file mode 100644 index 000000000..c9b9b2bd6 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/parser_cache.py @@ -0,0 +1,6 @@ +from jedi.inference.cache import inference_state_function_cache + + +@inference_state_function_cache() +def get_yield_exprs(inference_state, funcdef): + return list(funcdef.iter_yield_exprs()) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/recursion.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/recursion.py new file mode 100644 index 000000000..cc2418738 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/recursion.py @@ -0,0 +1,153 @@ +""" +Recursions are the recipe of |jedi| to conquer Python code. However, someone +must stop recursions going mad. Some settings are here to make |jedi| stop at +the right time. You can read more about them :ref:`here `. + +Next to the internal ``jedi.inference.cache`` this module also makes |jedi| not +thread-safe, because ``execution_recursion_decorator`` uses class variables to +count the function calls. + +.. _settings-recursion: + +Settings +~~~~~~~~~~ + +Recursion settings are important if you don't want extremely +recursive python code to go absolutely crazy. + +The default values are based on experiments while completing the |jedi| library +itself (inception!). But I don't think there's any other Python library that +uses recursion in a similarly extreme way. Completion should also be fast and +therefore the quality might not always be maximal. + +.. autodata:: recursion_limit +.. autodata:: total_function_execution_limit +.. autodata:: per_function_execution_limit +.. autodata:: per_function_recursion_limit +""" + +from contextlib import contextmanager + +from jedi import debug +from jedi.inference.base_value import NO_VALUES + + +recursion_limit = 15 +""" +Like :func:`sys.getrecursionlimit()`, just for |jedi|. +""" +total_function_execution_limit = 200 +""" +This is a hard limit of how many non-builtin functions can be executed. +""" +per_function_execution_limit = 6 +""" +The maximal amount of times a specific function may be executed. +""" +per_function_recursion_limit = 2 +""" +A function may not be executed more than this number of times recursively. +""" + + +class RecursionDetector: + def __init__(self): + self.pushed_nodes = [] + + +@contextmanager +def execution_allowed(inference_state, node): + """ + A decorator to detect recursions in statements. In a recursion a statement + at the same place, in the same module may not be executed two times. + """ + pushed_nodes = inference_state.recursion_detector.pushed_nodes + + if node in pushed_nodes: + debug.warning('catched stmt recursion: %s @%s', node, + getattr(node, 'start_pos', None)) + yield False + else: + try: + pushed_nodes.append(node) + yield True + finally: + pushed_nodes.pop() + + +def execution_recursion_decorator(default=NO_VALUES): + def decorator(func): + def wrapper(self, **kwargs): + detector = self.inference_state.execution_recursion_detector + limit_reached = detector.push_execution(self) + try: + if limit_reached: + result = default + else: + result = func(self, **kwargs) + finally: + detector.pop_execution() + return result + return wrapper + return decorator + + +class ExecutionRecursionDetector: + """ + Catches recursions of executions. + """ + def __init__(self, inference_state): + self._inference_state = inference_state + + self._recursion_level = 0 + self._parent_execution_funcs = [] + self._funcdef_execution_counts = {} + self._execution_count = 0 + + def pop_execution(self): + self._parent_execution_funcs.pop() + self._recursion_level -= 1 + + def push_execution(self, execution): + funcdef = execution.tree_node + + # These two will be undone in pop_execution. + self._recursion_level += 1 + self._parent_execution_funcs.append(funcdef) + + module_context = execution.get_root_context() + + if module_context.is_builtins_module(): + # We have control over builtins so we know they are not recursing + # like crazy. Therefore we just let them execute always, because + # they usually just help a lot with getting good results. + return False + + if self._recursion_level > recursion_limit: + debug.warning('Recursion limit (%s) reached', recursion_limit) + return True + + if self._execution_count >= total_function_execution_limit: + debug.warning('Function execution limit (%s) reached', total_function_execution_limit) + return True + self._execution_count += 1 + + if self._funcdef_execution_counts.setdefault(funcdef, 0) >= per_function_execution_limit: + if module_context.py__name__() == 'typing': + return False + debug.warning( + 'Per function execution limit (%s) reached: %s', + per_function_execution_limit, + funcdef + ) + return True + self._funcdef_execution_counts[funcdef] += 1 + + if self._parent_execution_funcs.count(funcdef) > per_function_recursion_limit: + debug.warning( + 'Per function recursion limit (%s) reached: %s', + per_function_recursion_limit, + funcdef + ) + return True + return False diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/references.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/references.py new file mode 100644 index 000000000..6ffa160e2 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/references.py @@ -0,0 +1,304 @@ +import os +import re + +from parso import python_bytes_to_unicode + +from jedi.debug import dbg +from jedi.file_io import KnownContentFileIO, FolderIO +from jedi.inference.names import SubModuleName +from jedi.inference.imports import load_module_from_path +from jedi.inference.filters import ParserTreeFilter +from jedi.inference.gradual.conversion import convert_names + +_IGNORE_FOLDERS = ('.tox', '.venv', '.mypy_cache', 'venv', '__pycache__') + +_OPENED_FILE_LIMIT = 2000 +""" +Stats from a 2016 Lenovo Notebook running Linux: +With os.walk, it takes about 10s to scan 11'000 files (without filesystem +caching). Once cached it only takes 5s. So it is expected that reading all +those files might take a few seconds, but not a lot more. +""" +_PARSED_FILE_LIMIT = 30 +""" +For now we keep the amount of parsed files really low, since parsing might take +easily 100ms for bigger files. +""" + + +def _resolve_names(definition_names, avoid_names=()): + for name in definition_names: + if name in avoid_names: + # Avoiding recursions here, because goto on a module name lands + # on the same module. + continue + + if not isinstance(name, SubModuleName): + # SubModuleNames are not actually existing names but created + # names when importing something like `import foo.bar.baz`. + yield name + + if name.api_type == 'module': + yield from _resolve_names(name.goto(), definition_names) + + +def _dictionarize(names): + return dict( + (n if n.tree_name is None else n.tree_name, n) + for n in names + ) + + +def _find_defining_names(module_context, tree_name): + found_names = _find_names(module_context, tree_name) + + for name in list(found_names): + # Convert from/to stubs, because those might also be usages. + found_names |= set(convert_names( + [name], + only_stubs=not name.get_root_context().is_stub(), + prefer_stub_to_compiled=False + )) + + found_names |= set(_find_global_variables(found_names, tree_name.value)) + for name in list(found_names): + if name.api_type == 'param' or name.tree_name is None \ + or name.tree_name.parent.type == 'trailer': + continue + found_names |= set(_add_names_in_same_context(name.parent_context, name.string_name)) + return set(_resolve_names(found_names)) + + +def _find_names(module_context, tree_name): + name = module_context.create_name(tree_name) + found_names = set(name.goto()) + found_names.add(name) + + return set(_resolve_names(found_names)) + + +def _add_names_in_same_context(context, string_name): + if context.tree_node is None: + return + + until_position = None + while True: + filter_ = ParserTreeFilter( + parent_context=context, + until_position=until_position, + ) + names = set(filter_.get(string_name)) + if not names: + break + yield from names + ordered = sorted(names, key=lambda x: x.start_pos) + until_position = ordered[0].start_pos + + +def _find_global_variables(names, search_name): + for name in names: + if name.tree_name is None: + continue + module_context = name.get_root_context() + try: + method = module_context.get_global_filter + except AttributeError: + continue + else: + for global_name in method().get(search_name): + yield global_name + c = module_context.create_context(global_name.tree_name) + yield from _add_names_in_same_context(c, global_name.string_name) + + +def find_references(module_context, tree_name, only_in_module=False): + inf = module_context.inference_state + search_name = tree_name.value + + # We disable flow analysis, because if we have ifs that are only true in + # certain cases, we want both sides. + try: + inf.flow_analysis_enabled = False + found_names = _find_defining_names(module_context, tree_name) + finally: + inf.flow_analysis_enabled = True + + found_names_dct = _dictionarize(found_names) + + module_contexts = [module_context] + if not only_in_module: + for m in set(d.get_root_context() for d in found_names): + if m != module_context and m.tree_node is not None \ + and inf.project.path in m.py__file__().parents: + module_contexts.append(m) + # For param no search for other modules is necessary. + if only_in_module or any(n.api_type == 'param' for n in found_names): + potential_modules = module_contexts + else: + potential_modules = get_module_contexts_containing_name( + inf, + module_contexts, + search_name, + ) + + non_matching_reference_maps = {} + for module_context in potential_modules: + for name_leaf in module_context.tree_node.get_used_names().get(search_name, []): + new = _dictionarize(_find_names(module_context, name_leaf)) + if any(tree_name in found_names_dct for tree_name in new): + found_names_dct.update(new) + for tree_name in new: + for dct in non_matching_reference_maps.get(tree_name, []): + # A reference that was previously searched for matches + # with a now found name. Merge. + found_names_dct.update(dct) + try: + del non_matching_reference_maps[tree_name] + except KeyError: + pass + else: + for name in new: + non_matching_reference_maps.setdefault(name, []).append(new) + result = found_names_dct.values() + if only_in_module: + return [n for n in result if n.get_root_context() == module_context] + return result + + +def _check_fs(inference_state, file_io, regex): + try: + code = file_io.read() + except FileNotFoundError: + return None + code = python_bytes_to_unicode(code, errors='replace') + if not regex.search(code): + return None + new_file_io = KnownContentFileIO(file_io.path, code) + m = load_module_from_path(inference_state, new_file_io) + if m.is_compiled(): + return None + return m.as_context() + + +def gitignored_lines(folder_io, file_io): + ignored_paths = set() + ignored_names = set() + for l in file_io.read().splitlines(): + if not l or l.startswith(b'#'): + continue + + p = l.decode('utf-8', 'ignore') + if p.startswith('/'): + name = p[1:] + if name.endswith(os.path.sep): + name = name[:-1] + ignored_paths.add(os.path.join(folder_io.path, name)) + else: + ignored_names.add(p) + return ignored_paths, ignored_names + + +def recurse_find_python_folders_and_files(folder_io, except_paths=()): + except_paths = set(except_paths) + for root_folder_io, folder_ios, file_ios in folder_io.walk(): + # Delete folders that we don't want to iterate over. + for file_io in file_ios: + path = file_io.path + if path.suffix in ('.py', '.pyi'): + if path not in except_paths: + yield None, file_io + + if path.name == '.gitignore': + ignored_paths, ignored_names = \ + gitignored_lines(root_folder_io, file_io) + except_paths |= ignored_paths + + folder_ios[:] = [ + folder_io + for folder_io in folder_ios + if folder_io.path not in except_paths + and folder_io.get_base_name() not in _IGNORE_FOLDERS + ] + for folder_io in folder_ios: + yield folder_io, None + + +def recurse_find_python_files(folder_io, except_paths=()): + for folder_io, file_io in recurse_find_python_folders_and_files(folder_io, except_paths): + if file_io is not None: + yield file_io + + +def _find_python_files_in_sys_path(inference_state, module_contexts): + sys_path = inference_state.get_sys_path() + except_paths = set() + yielded_paths = [m.py__file__() for m in module_contexts] + for module_context in module_contexts: + file_io = module_context.get_value().file_io + if file_io is None: + continue + + folder_io = file_io.get_parent_folder() + while True: + path = folder_io.path + if not any(path.startswith(p) for p in sys_path) or path in except_paths: + break + for file_io in recurse_find_python_files(folder_io, except_paths): + if file_io.path not in yielded_paths: + yield file_io + except_paths.add(path) + folder_io = folder_io.get_parent_folder() + + +def _find_project_modules(inference_state, module_contexts): + except_ = [m.py__file__() for m in module_contexts] + yield from recurse_find_python_files(FolderIO(inference_state.project.path), except_) + + +def get_module_contexts_containing_name(inference_state, module_contexts, name, + limit_reduction=1): + """ + Search a name in the directories of modules. + + :param limit_reduction: Divides the limits on opening/parsing files by this + factor. + """ + # Skip non python modules + for module_context in module_contexts: + if module_context.is_compiled(): + continue + yield module_context + + # Very short names are not searched in other modules for now to avoid lots + # of file lookups. + if len(name) <= 2: + return + + # Currently not used, because there's only `scope=project` and `scope=file` + # At the moment there is no such thing as `scope=sys.path`. + # file_io_iterator = _find_python_files_in_sys_path(inference_state, module_contexts) + file_io_iterator = _find_project_modules(inference_state, module_contexts) + yield from search_in_file_ios(inference_state, file_io_iterator, name, + limit_reduction=limit_reduction) + + +def search_in_file_ios(inference_state, file_io_iterator, name, + limit_reduction=1, complete=False): + parse_limit = _PARSED_FILE_LIMIT / limit_reduction + open_limit = _OPENED_FILE_LIMIT / limit_reduction + file_io_count = 0 + parsed_file_count = 0 + regex = re.compile(r'\b' + re.escape(name) + (r'' if complete else r'\b')) + for file_io in file_io_iterator: + file_io_count += 1 + m = _check_fs(inference_state, file_io, regex) + if m is not None: + parsed_file_count += 1 + yield m + if parsed_file_count >= parse_limit: + dbg('Hit limit of parsed files: %s', parse_limit) + break + + if file_io_count >= open_limit: + dbg('Hit limit of opened files: %s', open_limit) + break diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/signature.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/signature.py new file mode 100644 index 000000000..565a269b8 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/signature.py @@ -0,0 +1,152 @@ +from inspect import Parameter + +from jedi.cache import memoize_method +from jedi import debug +from jedi import parser_utils + + +class _SignatureMixin: + def to_string(self): + def param_strings(): + is_positional = False + is_kw_only = False + for n in self.get_param_names(resolve_stars=True): + kind = n.get_kind() + is_positional |= kind == Parameter.POSITIONAL_ONLY + if is_positional and kind != Parameter.POSITIONAL_ONLY: + yield '/' + is_positional = False + + if kind == Parameter.VAR_POSITIONAL: + is_kw_only = True + elif kind == Parameter.KEYWORD_ONLY and not is_kw_only: + yield '*' + is_kw_only = True + + yield n.to_string() + + if is_positional: + yield '/' + + s = self.name.string_name + '(' + ', '.join(param_strings()) + ')' + annotation = self.annotation_string + if annotation: + s += ' -> ' + annotation + return s + + +class AbstractSignature(_SignatureMixin): + def __init__(self, value, is_bound=False): + self.value = value + self.is_bound = is_bound + + @property + def name(self): + return self.value.name + + @property + def annotation_string(self): + return '' + + def get_param_names(self, resolve_stars=False): + param_names = self._function_value.get_param_names() + if self.is_bound: + return param_names[1:] + return param_names + + def bind(self, value): + raise NotImplementedError + + def matches_signature(self, arguments): + return True + + def __repr__(self): + if self.value is self._function_value: + return '<%s: %s>' % (self.__class__.__name__, self.value) + return '<%s: %s, %s>' % (self.__class__.__name__, self.value, self._function_value) + + +class TreeSignature(AbstractSignature): + def __init__(self, value, function_value=None, is_bound=False): + super().__init__(value, is_bound) + self._function_value = function_value or value + + def bind(self, value): + return TreeSignature(value, self._function_value, is_bound=True) + + @property + def _annotation(self): + # Classes don't need annotations, even if __init__ has one. They always + # return themselves. + if self.value.is_class(): + return None + return self._function_value.tree_node.annotation + + @property + def annotation_string(self): + a = self._annotation + if a is None: + return '' + return a.get_code(include_prefix=False) + + @memoize_method + def get_param_names(self, resolve_stars=False): + params = self._function_value.get_param_names() + if resolve_stars: + from jedi.inference.star_args import process_params + params = process_params(params) + if self.is_bound: + return params[1:] + return params + + def matches_signature(self, arguments): + from jedi.inference.param import get_executed_param_names_and_issues + executed_param_names, issues = \ + get_executed_param_names_and_issues(self._function_value, arguments) + if issues: + return False + + matches = all(executed_param_name.matches_signature() + for executed_param_name in executed_param_names) + if debug.enable_notice: + tree_node = self._function_value.tree_node + signature = parser_utils.get_signature(tree_node) + if matches: + debug.dbg("Overloading match: %s@%s (%s)", + signature, tree_node.start_pos[0], arguments, color='BLUE') + else: + debug.dbg("Overloading no match: %s@%s (%s)", + signature, tree_node.start_pos[0], arguments, color='BLUE') + return matches + + +class BuiltinSignature(AbstractSignature): + def __init__(self, value, return_string, function_value=None, is_bound=False): + super().__init__(value, is_bound) + self._return_string = return_string + self.__function_value = function_value + + @property + def annotation_string(self): + return self._return_string + + @property + def _function_value(self): + if self.__function_value is None: + return self.value + return self.__function_value + + def bind(self, value): + return BuiltinSignature( + value, self._return_string, + function_value=self.value, + is_bound=True + ) + + +class SignatureWrapper(_SignatureMixin): + def __init__(self, wrapped_signature): + self._wrapped_signature = wrapped_signature + + def __getattr__(self, name): + return getattr(self._wrapped_signature, name) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/star_args.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/star_args.py new file mode 100644 index 000000000..71ea70938 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/star_args.py @@ -0,0 +1,220 @@ +""" +This module is responsible for inferring *args and **kwargs for signatures. + +This means for example in this case:: + + def foo(a, b, c): ... + + def bar(*args): + return foo(1, *args) + +The signature here for bar should be `bar(b, c)` instead of bar(*args). +""" +from inspect import Parameter + +from parso import tree + +from jedi.inference.utils import to_list +from jedi.inference.names import ParamNameWrapper +from jedi.inference.helpers import is_big_annoying_library + + +def _iter_nodes_for_param(param_name): + from parso.python.tree import search_ancestor + from jedi.inference.arguments import TreeArguments + + execution_context = param_name.parent_context + # Walk up the parso tree to get the FunctionNode we want. We use the parso + # tree rather than going via the execution context so that we're agnostic of + # the specific scope we're evaluating within (i.e: module or function, + # etc.). + function_node = tree.search_ancestor(param_name.tree_name, 'funcdef', 'lambdef') + module_node = function_node.get_root_node() + start = function_node.children[-1].start_pos + end = function_node.children[-1].end_pos + for name in module_node.get_used_names().get(param_name.string_name): + if start <= name.start_pos < end: + # Is used in the function + argument = name.parent + if argument.type == 'argument' \ + and argument.children[0] == '*' * param_name.star_count: + trailer = search_ancestor(argument, 'trailer') + if trailer is not None: # Make sure we're in a function + context = execution_context.create_context(trailer) + if _goes_to_param_name(param_name, context, name): + values = _to_callables(context, trailer) + + args = TreeArguments.create_cached( + execution_context.inference_state, + context=context, + argument_node=trailer.children[1], + trailer=trailer, + ) + for c in values: + yield c, args + + +def _goes_to_param_name(param_name, context, potential_name): + if potential_name.type != 'name': + return False + from jedi.inference.names import TreeNameDefinition + found = TreeNameDefinition(context, potential_name).goto() + return any(param_name.parent_context == p.parent_context + and param_name.start_pos == p.start_pos + for p in found) + + +def _to_callables(context, trailer): + from jedi.inference.syntax_tree import infer_trailer + + atom_expr = trailer.parent + index = atom_expr.children[0] == 'await' + # Infer atom first + values = context.infer_node(atom_expr.children[index]) + for trailer2 in atom_expr.children[index + 1:]: + if trailer == trailer2: + break + values = infer_trailer(context, values, trailer2) + return values + + +def _remove_given_params(arguments, param_names): + count = 0 + used_keys = set() + for key, _ in arguments.unpack(): + if key is None: + count += 1 + else: + used_keys.add(key) + + for p in param_names: + if count and p.maybe_positional_argument(): + count -= 1 + continue + if p.string_name in used_keys and p.maybe_keyword_argument(): + continue + yield p + + +@to_list +def process_params(param_names, star_count=3): # default means both * and ** + if param_names: + if is_big_annoying_library(param_names[0].parent_context): + # At first this feature can look innocent, but it does a lot of + # type inference in some cases, so we just ditch it. + yield from param_names + return + + used_names = set() + arg_callables = [] + kwarg_callables = [] + + kw_only_names = [] + kwarg_names = [] + arg_names = [] + original_arg_name = None + original_kwarg_name = None + for p in param_names: + kind = p.get_kind() + if kind == Parameter.VAR_POSITIONAL: + if star_count & 1: + arg_callables = _iter_nodes_for_param(p) + original_arg_name = p + elif p.get_kind() == Parameter.VAR_KEYWORD: + if star_count & 2: + kwarg_callables = list(_iter_nodes_for_param(p)) + original_kwarg_name = p + elif kind == Parameter.KEYWORD_ONLY: + if star_count & 2: + kw_only_names.append(p) + elif kind == Parameter.POSITIONAL_ONLY: + if star_count & 1: + yield p + else: + if star_count == 1: + yield ParamNameFixedKind(p, Parameter.POSITIONAL_ONLY) + elif star_count == 2: + kw_only_names.append(ParamNameFixedKind(p, Parameter.KEYWORD_ONLY)) + else: + used_names.add(p.string_name) + yield p + + # First process *args + longest_param_names = () + found_arg_signature = False + found_kwarg_signature = False + for func_and_argument in arg_callables: + func, arguments = func_and_argument + new_star_count = star_count + if func_and_argument in kwarg_callables: + kwarg_callables.remove(func_and_argument) + else: + new_star_count = 1 + + for signature in func.get_signatures(): + found_arg_signature = True + if new_star_count == 3: + found_kwarg_signature = True + args_for_this_func = [] + for p in process_params( + list(_remove_given_params( + arguments, + signature.get_param_names(resolve_stars=False) + )), new_star_count): + if p.get_kind() == Parameter.VAR_KEYWORD: + kwarg_names.append(p) + elif p.get_kind() == Parameter.VAR_POSITIONAL: + arg_names.append(p) + elif p.get_kind() == Parameter.KEYWORD_ONLY: + kw_only_names.append(p) + else: + args_for_this_func.append(p) + if len(args_for_this_func) > len(longest_param_names): + longest_param_names = args_for_this_func + + for p in longest_param_names: + if star_count == 1 and p.get_kind() != Parameter.VAR_POSITIONAL: + yield ParamNameFixedKind(p, Parameter.POSITIONAL_ONLY) + else: + if p.get_kind() == Parameter.POSITIONAL_OR_KEYWORD: + used_names.add(p.string_name) + yield p + + if not found_arg_signature and original_arg_name is not None: + yield original_arg_name + elif arg_names: + yield arg_names[0] + + # Then process **kwargs + for func, arguments in kwarg_callables: + for signature in func.get_signatures(): + found_kwarg_signature = True + for p in process_params( + list(_remove_given_params( + arguments, + signature.get_param_names(resolve_stars=False) + )), star_count=2): + if p.get_kind() == Parameter.VAR_KEYWORD: + kwarg_names.append(p) + elif p.get_kind() == Parameter.KEYWORD_ONLY: + kw_only_names.append(p) + + for p in kw_only_names: + if p.string_name in used_names: + continue + yield p + used_names.add(p.string_name) + + if not found_kwarg_signature and original_kwarg_name is not None: + yield original_kwarg_name + elif kwarg_names: + yield kwarg_names[0] + + +class ParamNameFixedKind(ParamNameWrapper): + def __init__(self, param_name, new_kind): + super().__init__(param_name) + self._new_kind = new_kind + + def get_kind(self): + return self._new_kind diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/syntax_tree.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/syntax_tree.py new file mode 100644 index 000000000..f55ab56bb --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/syntax_tree.py @@ -0,0 +1,883 @@ +""" +Functions inferring the syntax tree. +""" +import copy + +from parso.python import tree + +from jedi import debug +from jedi import parser_utils +from jedi.inference.base_value import ValueSet, NO_VALUES, ContextualizedNode, \ + iterator_to_value_set, iterate_values +from jedi.inference.lazy_value import LazyTreeValue +from jedi.inference import compiled +from jedi.inference import recursion +from jedi.inference import analysis +from jedi.inference import imports +from jedi.inference import arguments +from jedi.inference.value import ClassValue, FunctionValue +from jedi.inference.value import iterable +from jedi.inference.value.dynamic_arrays import ListModification, DictModification +from jedi.inference.value import TreeInstance +from jedi.inference.helpers import is_string, is_literal, is_number, \ + get_names_of_node, is_big_annoying_library +from jedi.inference.compiled.access import COMPARISON_OPERATORS +from jedi.inference.cache import inference_state_method_cache +from jedi.inference.gradual.stub_value import VersionInfo +from jedi.inference.gradual import annotation +from jedi.inference.names import TreeNameDefinition +from jedi.inference.context import CompForContext +from jedi.inference.value.decorator import Decoratee +from jedi.plugins import plugin_manager + +operator_to_magic_method = { + '+': '__add__', + '-': '__sub__', + '*': '__mul__', + '@': '__matmul__', + '/': '__truediv__', + '//': '__floordiv__', + '%': '__mod__', + '**': '__pow__', + '<<': '__lshift__', + '>>': '__rshift__', + '&': '__and__', + '|': '__or__', + '^': '__xor__', +} + +reverse_operator_to_magic_method = { + k: '__r' + v[2:] for k, v in operator_to_magic_method.items() +} + + +def _limit_value_infers(func): + """ + This is for now the way how we limit type inference going wild. There are + other ways to ensure recursion limits as well. This is mostly necessary + because of instance (self) access that can be quite tricky to limit. + + I'm still not sure this is the way to go, but it looks okay for now and we + can still go anther way in the future. Tests are there. ~ dave + """ + def wrapper(context, *args, **kwargs): + n = context.tree_node + inference_state = context.inference_state + try: + inference_state.inferred_element_counts[n] += 1 + maximum = 300 + if context.parent_context is None \ + and context.get_value() is inference_state.builtins_module: + # Builtins should have a more generous inference limit. + # It is important that builtins can be executed, otherwise some + # functions that depend on certain builtins features would be + # broken, see e.g. GH #1432 + maximum *= 100 + + if inference_state.inferred_element_counts[n] > maximum: + debug.warning('In value %s there were too many inferences.', n) + return NO_VALUES + except KeyError: + inference_state.inferred_element_counts[n] = 1 + return func(context, *args, **kwargs) + + return wrapper + + +def infer_node(context, element): + if isinstance(context, CompForContext): + return _infer_node(context, element) + + if_stmt = element + while if_stmt is not None: + if_stmt = if_stmt.parent + if if_stmt.type in ('if_stmt', 'for_stmt'): + break + if parser_utils.is_scope(if_stmt): + if_stmt = None + break + predefined_if_name_dict = context.predefined_names.get(if_stmt) + # TODO there's a lot of issues with this one. We actually should do + # this in a different way. Caching should only be active in certain + # cases and this all sucks. + if predefined_if_name_dict is None and if_stmt \ + and if_stmt.type == 'if_stmt' and context.inference_state.is_analysis: + if_stmt_test = if_stmt.children[1] + name_dicts = [{}] + # If we already did a check, we don't want to do it again -> If + # value.predefined_names is filled, we stop. + # We don't want to check the if stmt itself, it's just about + # the content. + if element.start_pos > if_stmt_test.end_pos: + # Now we need to check if the names in the if_stmt match the + # names in the suite. + if_names = get_names_of_node(if_stmt_test) + element_names = get_names_of_node(element) + str_element_names = [e.value for e in element_names] + if any(i.value in str_element_names for i in if_names): + for if_name in if_names: + definitions = context.inference_state.infer(context, if_name) + # Every name that has multiple different definitions + # causes the complexity to rise. The complexity should + # never fall below 1. + if len(definitions) > 1: + if len(name_dicts) * len(definitions) > 16: + debug.dbg('Too many options for if branch inference %s.', if_stmt) + # There's only a certain amount of branches + # Jedi can infer, otherwise it will take to + # long. + name_dicts = [{}] + break + + original_name_dicts = list(name_dicts) + name_dicts = [] + for definition in definitions: + new_name_dicts = list(original_name_dicts) + for i, name_dict in enumerate(new_name_dicts): + new_name_dicts[i] = name_dict.copy() + new_name_dicts[i][if_name.value] = ValueSet([definition]) + + name_dicts += new_name_dicts + else: + for name_dict in name_dicts: + name_dict[if_name.value] = definitions + if len(name_dicts) > 1: + result = NO_VALUES + for name_dict in name_dicts: + with context.predefine_names(if_stmt, name_dict): + result |= _infer_node(context, element) + return result + else: + return _infer_node_if_inferred(context, element) + else: + if predefined_if_name_dict: + return _infer_node(context, element) + else: + return _infer_node_if_inferred(context, element) + + +def _infer_node_if_inferred(context, element): + """ + TODO This function is temporary: Merge with infer_node. + """ + parent = element + while parent is not None: + parent = parent.parent + predefined_if_name_dict = context.predefined_names.get(parent) + if predefined_if_name_dict is not None: + return _infer_node(context, element) + return _infer_node_cached(context, element) + + +@inference_state_method_cache(default=NO_VALUES) +def _infer_node_cached(context, element): + return _infer_node(context, element) + + +@debug.increase_indent +@_limit_value_infers +def _infer_node(context, element): + debug.dbg('infer_node %s@%s in %s', element, element.start_pos, context) + inference_state = context.inference_state + typ = element.type + if typ in ('name', 'number', 'string', 'atom', 'strings', 'keyword', 'fstring'): + return infer_atom(context, element) + elif typ == 'lambdef': + return ValueSet([FunctionValue.from_context(context, element)]) + elif typ == 'expr_stmt': + return infer_expr_stmt(context, element) + elif typ in ('power', 'atom_expr'): + first_child = element.children[0] + children = element.children[1:] + had_await = False + if first_child.type == 'keyword' and first_child.value == 'await': + had_await = True + first_child = children.pop(0) + + value_set = context.infer_node(first_child) + for (i, trailer) in enumerate(children): + if trailer == '**': # has a power operation. + right = context.infer_node(children[i + 1]) + value_set = _infer_comparison( + context, + value_set, + trailer, + right + ) + break + value_set = infer_trailer(context, value_set, trailer) + + if had_await: + return value_set.py__await__().py__stop_iteration_returns() + return value_set + elif typ in ('testlist_star_expr', 'testlist',): + # The implicit tuple in statements. + return ValueSet([iterable.SequenceLiteralValue(inference_state, context, element)]) + elif typ in ('not_test', 'factor'): + value_set = context.infer_node(element.children[-1]) + for operator in element.children[:-1]: + value_set = infer_factor(value_set, operator) + return value_set + elif typ == 'test': + # `x if foo else y` case. + return (context.infer_node(element.children[0]) + | context.infer_node(element.children[-1])) + elif typ == 'operator': + # Must be an ellipsis, other operators are not inferred. + if element.value != '...': + origin = element.parent + raise AssertionError("unhandled operator %s in %s " % (repr(element.value), origin)) + return ValueSet([compiled.builtin_from_name(inference_state, 'Ellipsis')]) + elif typ == 'dotted_name': + value_set = infer_atom(context, element.children[0]) + for next_name in element.children[2::2]: + value_set = value_set.py__getattribute__(next_name, name_context=context) + return value_set + elif typ == 'eval_input': + return context.infer_node(element.children[0]) + elif typ == 'annassign': + return annotation.infer_annotation(context, element.children[1]) \ + .execute_annotation() + elif typ == 'yield_expr': + if len(element.children) and element.children[1].type == 'yield_arg': + # Implies that it's a yield from. + element = element.children[1].children[1] + generators = context.infer_node(element) \ + .py__getattribute__('__iter__').execute_with_values() + return generators.py__stop_iteration_returns() + + # Generator.send() is not implemented. + return NO_VALUES + elif typ == 'namedexpr_test': + return context.infer_node(element.children[2]) + else: + return infer_or_test(context, element) + + +def infer_trailer(context, atom_values, trailer): + trailer_op, node = trailer.children[:2] + if node == ')': # `arglist` is optional. + node = None + + if trailer_op == '[': + trailer_op, node, _ = trailer.children + return atom_values.get_item( + _infer_subscript_list(context, node), + ContextualizedNode(context, trailer) + ) + else: + debug.dbg('infer_trailer: %s in %s', trailer, atom_values) + if trailer_op == '.': + return atom_values.py__getattribute__( + name_context=context, + name_or_str=node + ) + else: + assert trailer_op == '(', 'trailer_op is actually %s' % trailer_op + args = arguments.TreeArguments(context.inference_state, context, node, trailer) + return atom_values.execute(args) + + +def infer_atom(context, atom): + """ + Basically to process ``atom`` nodes. The parser sometimes doesn't + generate the node (because it has just one child). In that case an atom + might be a name or a literal as well. + """ + state = context.inference_state + if atom.type == 'name': + # This is the first global lookup. + stmt = tree.search_ancestor(atom, 'expr_stmt', 'lambdef', 'if_stmt') or atom + if stmt.type == 'if_stmt': + if not any(n.start_pos <= atom.start_pos < n.end_pos for n in stmt.get_test_nodes()): + stmt = atom + elif stmt.type == 'lambdef': + stmt = atom + position = stmt.start_pos + if _is_annotation_name(atom): + # Since Python 3.7 (with from __future__ import annotations), + # annotations are essentially strings and can reference objects + # that are defined further down in code. Therefore just set the + # position to None, so the finder will not try to stop at a certain + # position in the module. + position = None + return context.py__getattribute__(atom, position=position) + elif atom.type == 'keyword': + # For False/True/None + if atom.value in ('False', 'True', 'None'): + return ValueSet([compiled.builtin_from_name(state, atom.value)]) + elif atom.value == 'yield': + # Contrary to yield from, yield can just appear alone to return a + # value when used with `.send()`. + return NO_VALUES + assert False, 'Cannot infer the keyword %s' % atom + + elif isinstance(atom, tree.Literal): + string = state.compiled_subprocess.safe_literal_eval(atom.value) + return ValueSet([compiled.create_simple_object(state, string)]) + elif atom.type == 'strings': + # Will be multiple string. + value_set = infer_atom(context, atom.children[0]) + for string in atom.children[1:]: + right = infer_atom(context, string) + value_set = _infer_comparison(context, value_set, '+', right) + return value_set + elif atom.type == 'fstring': + return compiled.get_string_value_set(state) + else: + c = atom.children + # Parentheses without commas are not tuples. + if c[0] == '(' and not len(c) == 2 \ + and not(c[1].type == 'testlist_comp' + and len(c[1].children) > 1): + return context.infer_node(c[1]) + + try: + comp_for = c[1].children[1] + except (IndexError, AttributeError): + pass + else: + if comp_for == ':': + # Dict comprehensions have a colon at the 3rd index. + try: + comp_for = c[1].children[3] + except IndexError: + pass + + if comp_for.type in ('comp_for', 'sync_comp_for'): + return ValueSet([iterable.comprehension_from_atom( + state, context, atom + )]) + + # It's a dict/list/tuple literal. + array_node = c[1] + try: + array_node_c = array_node.children + except AttributeError: + array_node_c = [] + if c[0] == '{' and (array_node == '}' or ':' in array_node_c + or '**' in array_node_c): + new_value = iterable.DictLiteralValue(state, context, atom) + else: + new_value = iterable.SequenceLiteralValue(state, context, atom) + return ValueSet([new_value]) + + +@_limit_value_infers +def infer_expr_stmt(context, stmt, seek_name=None): + with recursion.execution_allowed(context.inference_state, stmt) as allowed: + if allowed: + if seek_name is not None: + pep0484_values = \ + annotation.find_type_from_comment_hint_assign(context, stmt, seek_name) + if pep0484_values: + return pep0484_values + + return _infer_expr_stmt(context, stmt, seek_name) + return NO_VALUES + + +@debug.increase_indent +def _infer_expr_stmt(context, stmt, seek_name=None): + """ + The starting point of the completion. A statement always owns a call + list, which are the calls, that a statement does. In case multiple + names are defined in the statement, `seek_name` returns the result for + this name. + + expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | + ('=' (yield_expr|testlist_star_expr))*) + annassign: ':' test ['=' test] + augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | + '<<=' | '>>=' | '**=' | '//=') + + :param stmt: A `tree.ExprStmt`. + """ + def check_setitem(stmt): + atom_expr = stmt.children[0] + if atom_expr.type not in ('atom_expr', 'power'): + return False, None + name = atom_expr.children[0] + if name.type != 'name' or len(atom_expr.children) != 2: + return False, None + trailer = atom_expr.children[-1] + return trailer.children[0] == '[', trailer.children[1] + + debug.dbg('infer_expr_stmt %s (%s)', stmt, seek_name) + rhs = stmt.get_rhs() + + value_set = context.infer_node(rhs) + + if seek_name: + n = TreeNameDefinition(context, seek_name) + value_set = check_tuple_assignments(n, value_set) + + first_operator = next(stmt.yield_operators(), None) + is_setitem, subscriptlist = check_setitem(stmt) + is_annassign = first_operator not in ('=', None) and first_operator.type == 'operator' + if is_annassign or is_setitem: + # `=` is always the last character in aug assignments -> -1 + name = stmt.get_defined_names(include_setitem=True)[0].value + left_values = context.py__getattribute__(name, position=stmt.start_pos) + + if is_setitem: + def to_mod(v): + c = ContextualizedSubscriptListNode(context, subscriptlist) + if v.array_type == 'dict': + return DictModification(v, value_set, c) + elif v.array_type == 'list': + return ListModification(v, value_set, c) + return v + + value_set = ValueSet(to_mod(v) for v in left_values) + else: + operator = copy.copy(first_operator) + operator.value = operator.value[:-1] + for_stmt = tree.search_ancestor(stmt, 'for_stmt') + if for_stmt is not None and for_stmt.type == 'for_stmt' and value_set \ + and parser_utils.for_stmt_defines_one_name(for_stmt): + # Iterate through result and add the values, that's possible + # only in for loops without clutter, because they are + # predictable. Also only do it, if the variable is not a tuple. + node = for_stmt.get_testlist() + cn = ContextualizedNode(context, node) + ordered = list(cn.infer().iterate(cn)) + + for lazy_value in ordered: + dct = {for_stmt.children[1].value: lazy_value.infer()} + with context.predefine_names(for_stmt, dct): + t = context.infer_node(rhs) + left_values = _infer_comparison(context, left_values, operator, t) + value_set = left_values + else: + value_set = _infer_comparison(context, left_values, operator, value_set) + debug.dbg('infer_expr_stmt result %s', value_set) + return value_set + + +def infer_or_test(context, or_test): + iterator = iter(or_test.children) + types = context.infer_node(next(iterator)) + for operator in iterator: + right = next(iterator) + if operator.type == 'comp_op': # not in / is not + operator = ' '.join(c.value for c in operator.children) + + # handle type inference of and/or here. + if operator in ('and', 'or'): + left_bools = set(left.py__bool__() for left in types) + if left_bools == {True}: + if operator == 'and': + types = context.infer_node(right) + elif left_bools == {False}: + if operator != 'and': + types = context.infer_node(right) + # Otherwise continue, because of uncertainty. + else: + types = _infer_comparison(context, types, operator, + context.infer_node(right)) + debug.dbg('infer_or_test types %s', types) + return types + + +@iterator_to_value_set +def infer_factor(value_set, operator): + """ + Calculates `+`, `-`, `~` and `not` prefixes. + """ + for value in value_set: + if operator == '-': + if is_number(value): + yield value.negate() + elif operator == 'not': + b = value.py__bool__() + if b is None: # Uncertainty. + return + yield compiled.create_simple_object(value.inference_state, not b) + else: + yield value + + +def _literals_to_types(inference_state, result): + # Changes literals ('a', 1, 1.0, etc) to its type instances (str(), + # int(), float(), etc). + new_result = NO_VALUES + for typ in result: + if is_literal(typ): + # Literals are only valid as long as the operations are + # correct. Otherwise add a value-free instance. + cls = compiled.builtin_from_name(inference_state, typ.name.string_name) + new_result |= cls.execute_with_values() + else: + new_result |= ValueSet([typ]) + return new_result + + +def _infer_comparison(context, left_values, operator, right_values): + state = context.inference_state + if not left_values or not right_values: + # illegal slices e.g. cause left/right_result to be None + result = (left_values or NO_VALUES) | (right_values or NO_VALUES) + return _literals_to_types(state, result) + else: + # I don't think there's a reasonable chance that a string + # operation is still correct, once we pass something like six + # objects. + if len(left_values) * len(right_values) > 6: + return _literals_to_types(state, left_values | right_values) + else: + return ValueSet.from_sets( + _infer_comparison_part(state, context, left, operator, right) + for left in left_values + for right in right_values + ) + + +def _is_annotation_name(name): + ancestor = tree.search_ancestor(name, 'param', 'funcdef', 'expr_stmt') + if ancestor is None: + return False + + if ancestor.type in ('param', 'funcdef'): + ann = ancestor.annotation + if ann is not None: + return ann.start_pos <= name.start_pos < ann.end_pos + elif ancestor.type == 'expr_stmt': + c = ancestor.children + if len(c) > 1 and c[1].type == 'annassign': + return c[1].start_pos <= name.start_pos < c[1].end_pos + return False + + +def _is_list(value): + return value.array_type == 'list' + + +def _is_tuple(value): + return value.array_type == 'tuple' + + +def _bool_to_value(inference_state, bool_): + return compiled.builtin_from_name(inference_state, str(bool_)) + + +def _get_tuple_ints(value): + if not isinstance(value, iterable.SequenceLiteralValue): + return None + numbers = [] + for lazy_value in value.py__iter__(): + if not isinstance(lazy_value, LazyTreeValue): + return None + node = lazy_value.data + if node.type != 'number': + return None + try: + numbers.append(int(node.value)) + except ValueError: + return None + return numbers + + +def _infer_comparison_part(inference_state, context, left, operator, right): + l_is_num = is_number(left) + r_is_num = is_number(right) + if isinstance(operator, str): + str_operator = operator + else: + str_operator = str(operator.value) + + if str_operator == '*': + # for iterables, ignore * operations + if isinstance(left, iterable.Sequence) or is_string(left): + return ValueSet([left]) + elif isinstance(right, iterable.Sequence) or is_string(right): + return ValueSet([right]) + elif str_operator == '+': + if l_is_num and r_is_num or is_string(left) and is_string(right): + return left.execute_operation(right, str_operator) + elif _is_list(left) and _is_list(right) or _is_tuple(left) and _is_tuple(right): + return ValueSet([iterable.MergedArray(inference_state, (left, right))]) + elif str_operator == '-': + if l_is_num and r_is_num: + return left.execute_operation(right, str_operator) + elif str_operator == '%': + # With strings and numbers the left type typically remains. Except for + # `int() % float()`. + return ValueSet([left]) + elif str_operator in COMPARISON_OPERATORS: + if left.is_compiled() and right.is_compiled(): + # Possible, because the return is not an option. Just compare. + result = left.execute_operation(right, str_operator) + if result: + return result + else: + if str_operator in ('is', '!=', '==', 'is not'): + operation = COMPARISON_OPERATORS[str_operator] + bool_ = operation(left, right) + # Only if == returns True or != returns False, we can continue. + # There's no guarantee that they are not equal. This can help + # in some cases, but does not cover everything. + if (str_operator in ('is', '==')) == bool_: + return ValueSet([_bool_to_value(inference_state, bool_)]) + + if isinstance(left, VersionInfo): + version_info = _get_tuple_ints(right) + if version_info is not None: + bool_result = compiled.access.COMPARISON_OPERATORS[operator]( + inference_state.environment.version_info, + tuple(version_info) + ) + return ValueSet([_bool_to_value(inference_state, bool_result)]) + + return ValueSet([ + _bool_to_value(inference_state, True), + _bool_to_value(inference_state, False) + ]) + elif str_operator in ('in', 'not in'): + return NO_VALUES + + def check(obj): + """Checks if a Jedi object is either a float or an int.""" + return isinstance(obj, TreeInstance) and \ + obj.name.string_name in ('int', 'float') + + # Static analysis, one is a number, the other one is not. + if str_operator in ('+', '-') and l_is_num != r_is_num \ + and not (check(left) or check(right)): + message = "TypeError: unsupported operand type(s) for +: %s and %s" + analysis.add(context, 'type-error-operation', operator, + message % (left, right)) + + if left.is_class() or right.is_class(): + return NO_VALUES + + method_name = operator_to_magic_method[str_operator] + magic_methods = left.py__getattribute__(method_name) + if magic_methods: + result = magic_methods.execute_with_values(right) + if result: + return result + + if not magic_methods: + reverse_method_name = reverse_operator_to_magic_method[str_operator] + magic_methods = right.py__getattribute__(reverse_method_name) + + result = magic_methods.execute_with_values(left) + if result: + return result + + result = ValueSet([left, right]) + debug.dbg('Used operator %s resulting in %s', operator, result) + return result + + +@plugin_manager.decorate() +def tree_name_to_values(inference_state, context, tree_name): + value_set = NO_VALUES + module_node = context.get_root_context().tree_node + # First check for annotations, like: `foo: int = 3` + if module_node is not None: + names = module_node.get_used_names().get(tree_name.value, []) + found_annotation = False + for name in names: + expr_stmt = name.parent + + if expr_stmt.type == "expr_stmt" and expr_stmt.children[1].type == "annassign": + correct_scope = parser_utils.get_parent_scope(name) == context.tree_node + if correct_scope: + found_annotation = True + value_set |= annotation.infer_annotation( + context, expr_stmt.children[1].children[1] + ).execute_annotation() + if found_annotation: + return value_set + + types = [] + node = tree_name.get_definition(import_name_always=True, include_setitem=True) + if node is None: + node = tree_name.parent + if node.type == 'global_stmt': + c = context.create_context(tree_name) + if c.is_module(): + # In case we are already part of the module, there is no point + # in looking up the global statement anymore, because it's not + # valid at that point anyway. + return NO_VALUES + # For global_stmt lookups, we only need the first possible scope, + # which means the function itself. + filter = next(c.get_filters()) + names = filter.get(tree_name.value) + return ValueSet.from_sets(name.infer() for name in names) + elif node.type not in ('import_from', 'import_name'): + c = context.create_context(tree_name) + return infer_atom(c, tree_name) + + typ = node.type + if typ == 'for_stmt': + types = annotation.find_type_from_comment_hint_for(context, node, tree_name) + if types: + return types + if typ == 'with_stmt': + types = annotation.find_type_from_comment_hint_with(context, node, tree_name) + if types: + return types + + if typ in ('for_stmt', 'comp_for', 'sync_comp_for'): + try: + types = context.predefined_names[node][tree_name.value] + except KeyError: + cn = ContextualizedNode(context, node.children[3]) + for_types = iterate_values( + cn.infer(), + contextualized_node=cn, + is_async=node.parent.type == 'async_stmt', + ) + n = TreeNameDefinition(context, tree_name) + types = check_tuple_assignments(n, for_types) + elif typ == 'expr_stmt': + types = infer_expr_stmt(context, node, tree_name) + elif typ == 'with_stmt': + value_managers = context.infer_node(node.get_test_node_from_name(tree_name)) + if node.parent.type == 'async_stmt': + # In the case of `async with` statements, we need to + # first get the coroutine from the `__aenter__` method, + # then "unwrap" via the `__await__` method + enter_methods = value_managers.py__getattribute__('__aenter__') + coro = enter_methods.execute_with_values() + return coro.py__await__().py__stop_iteration_returns() + enter_methods = value_managers.py__getattribute__('__enter__') + return enter_methods.execute_with_values() + elif typ in ('import_from', 'import_name'): + types = imports.infer_import(context, tree_name) + elif typ in ('funcdef', 'classdef'): + types = _apply_decorators(context, node) + elif typ == 'try_stmt': + # TODO an exception can also be a tuple. Check for those. + # TODO check for types that are not classes and add it to + # the static analysis report. + exceptions = context.infer_node(tree_name.get_previous_sibling().get_previous_sibling()) + types = exceptions.execute_with_values() + elif typ == 'param': + types = NO_VALUES + elif typ == 'del_stmt': + types = NO_VALUES + elif typ == 'namedexpr_test': + types = infer_node(context, node) + else: + raise ValueError("Should not happen. type: %s" % typ) + return types + + +# We don't want to have functions/classes that are created by the same +# tree_node. +@inference_state_method_cache() +def _apply_decorators(context, node): + """ + Returns the function, that should to be executed in the end. + This is also the places where the decorators are processed. + """ + if node.type == 'classdef': + decoratee_value = ClassValue( + context.inference_state, + parent_context=context, + tree_node=node + ) + else: + decoratee_value = FunctionValue.from_context(context, node) + initial = values = ValueSet([decoratee_value]) + + if is_big_annoying_library(context): + return values + + for dec in reversed(node.get_decorators()): + debug.dbg('decorator: %s %s', dec, values, color="MAGENTA") + with debug.increase_indent_cm(): + dec_values = context.infer_node(dec.children[1]) + trailer_nodes = dec.children[2:-1] + if trailer_nodes: + # Create a trailer and infer it. + trailer = tree.PythonNode('trailer', trailer_nodes) + trailer.parent = dec + dec_values = infer_trailer(context, dec_values, trailer) + + if not len(dec_values): + code = dec.get_code(include_prefix=False) + # For the short future, we don't want to hear about the runtime + # decorator in typing that was intentionally omitted. This is not + # "correct", but helps with debugging. + if code != '@runtime\n': + debug.warning('decorator not found: %s on %s', dec, node) + return initial + + values = dec_values.execute(arguments.ValuesArguments([values])) + if not len(values): + debug.warning('not possible to resolve wrappers found %s', node) + return initial + + debug.dbg('decorator end %s', values, color="MAGENTA") + if values != initial: + return ValueSet([Decoratee(c, decoratee_value) for c in values]) + return values + + +def check_tuple_assignments(name, value_set): + """ + Checks if tuples are assigned. + """ + lazy_value = None + for index, node in name.assignment_indexes(): + cn = ContextualizedNode(name.parent_context, node) + iterated = value_set.iterate(cn) + if isinstance(index, slice): + # For no star unpacking is not possible. + return NO_VALUES + i = 0 + while i <= index: + try: + lazy_value = next(iterated) + except StopIteration: + # We could do this with the default param in next. But this + # would allow this loop to run for a very long time if the + # index number is high. Therefore break if the loop is + # finished. + return NO_VALUES + else: + i += lazy_value.max + value_set = lazy_value.infer() + return value_set + + +class ContextualizedSubscriptListNode(ContextualizedNode): + def infer(self): + return _infer_subscript_list(self.context, self.node) + + +def _infer_subscript_list(context, index): + """ + Handles slices in subscript nodes. + """ + if index == ':': + # Like array[:] + return ValueSet([iterable.Slice(context, None, None, None)]) + + elif index.type == 'subscript' and not index.children[0] == '.': + # subscript basically implies a slice operation + # e.g. array[:3] + result = [] + for el in index.children: + if el == ':': + if not result: + result.append(None) + elif el.type == 'sliceop': + if len(el.children) == 2: + result.append(el.children[1]) + else: + result.append(el) + result += [None] * (3 - len(result)) + + return ValueSet([iterable.Slice(context, *result)]) + elif index.type == 'subscriptlist': + return ValueSet([iterable.SequenceLiteralValue(context.inference_state, context, index)]) + + # No slices + return context.infer_node(index) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/sys_path.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/sys_path.py new file mode 100644 index 000000000..062a0aa3a --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/sys_path.py @@ -0,0 +1,272 @@ +import os +import re +from pathlib import Path +from importlib.machinery import all_suffixes + +from jedi.inference.cache import inference_state_method_cache +from jedi.inference.base_value import ContextualizedNode +from jedi.inference.helpers import is_string, get_str_or_none +from jedi.parser_utils import get_cached_code_lines +from jedi.file_io import FileIO +from jedi import settings +from jedi import debug + +_BUILDOUT_PATH_INSERTION_LIMIT = 10 + + +def _abs_path(module_context, str_path: str): + path = Path(str_path) + if path.is_absolute(): + return path + + module_path = module_context.py__file__() + if module_path is None: + # In this case we have no idea where we actually are in the file + # system. + return None + + base_dir = module_path.parent + return base_dir.joinpath(path).absolute() + + +def _paths_from_assignment(module_context, expr_stmt): + """ + Extracts the assigned strings from an assignment that looks as follows:: + + sys.path[0:0] = ['module/path', 'another/module/path'] + + This function is in general pretty tolerant (and therefore 'buggy'). + However, it's not a big issue usually to add more paths to Jedi's sys_path, + because it will only affect Jedi in very random situations and by adding + more paths than necessary, it usually benefits the general user. + """ + for assignee, operator in zip(expr_stmt.children[::2], expr_stmt.children[1::2]): + try: + assert operator in ['=', '+='] + assert assignee.type in ('power', 'atom_expr') and \ + len(assignee.children) > 1 + c = assignee.children + assert c[0].type == 'name' and c[0].value == 'sys' + trailer = c[1] + assert trailer.children[0] == '.' and trailer.children[1].value == 'path' + # TODO Essentially we're not checking details on sys.path + # manipulation. Both assigment of the sys.path and changing/adding + # parts of the sys.path are the same: They get added to the end of + # the current sys.path. + """ + execution = c[2] + assert execution.children[0] == '[' + subscript = execution.children[1] + assert subscript.type == 'subscript' + assert ':' in subscript.children + """ + except AssertionError: + continue + + cn = ContextualizedNode(module_context.create_context(expr_stmt), expr_stmt) + for lazy_value in cn.infer().iterate(cn): + for value in lazy_value.infer(): + if is_string(value): + abs_path = _abs_path(module_context, value.get_safe_value()) + if abs_path is not None: + yield abs_path + + +def _paths_from_list_modifications(module_context, trailer1, trailer2): + """ extract the path from either "sys.path.append" or "sys.path.insert" """ + # Guarantee that both are trailers, the first one a name and the second one + # a function execution with at least one param. + if not (trailer1.type == 'trailer' and trailer1.children[0] == '.' + and trailer2.type == 'trailer' and trailer2.children[0] == '(' + and len(trailer2.children) == 3): + return + + name = trailer1.children[1].value + if name not in ['insert', 'append']: + return + arg = trailer2.children[1] + if name == 'insert' and len(arg.children) in (3, 4): # Possible trailing comma. + arg = arg.children[2] + + for value in module_context.create_context(arg).infer_node(arg): + p = get_str_or_none(value) + if p is None: + continue + abs_path = _abs_path(module_context, p) + if abs_path is not None: + yield abs_path + + +@inference_state_method_cache(default=[]) +def check_sys_path_modifications(module_context): + """ + Detect sys.path modifications within module. + """ + def get_sys_path_powers(names): + for name in names: + power = name.parent.parent + if power is not None and power.type in ('power', 'atom_expr'): + c = power.children + if c[0].type == 'name' and c[0].value == 'sys' \ + and c[1].type == 'trailer': + n = c[1].children[1] + if n.type == 'name' and n.value == 'path': + yield name, power + + if module_context.tree_node is None: + return [] + + added = [] + try: + possible_names = module_context.tree_node.get_used_names()['path'] + except KeyError: + pass + else: + for name, power in get_sys_path_powers(possible_names): + expr_stmt = power.parent + if len(power.children) >= 4: + added.extend( + _paths_from_list_modifications( + module_context, *power.children[2:4] + ) + ) + elif expr_stmt is not None and expr_stmt.type == 'expr_stmt': + added.extend(_paths_from_assignment(module_context, expr_stmt)) + return added + + +def discover_buildout_paths(inference_state, script_path): + buildout_script_paths = set() + + for buildout_script_path in _get_buildout_script_paths(script_path): + for path in _get_paths_from_buildout_script(inference_state, buildout_script_path): + buildout_script_paths.add(path) + if len(buildout_script_paths) >= _BUILDOUT_PATH_INSERTION_LIMIT: + break + + return buildout_script_paths + + +def _get_paths_from_buildout_script(inference_state, buildout_script_path): + file_io = FileIO(str(buildout_script_path)) + try: + module_node = inference_state.parse( + file_io=file_io, + cache=True, + cache_path=settings.cache_directory + ) + except IOError: + debug.warning('Error trying to read buildout_script: %s', buildout_script_path) + return + + from jedi.inference.value import ModuleValue + module_context = ModuleValue( + inference_state, module_node, + file_io=file_io, + string_names=None, + code_lines=get_cached_code_lines(inference_state.grammar, buildout_script_path), + ).as_context() + yield from check_sys_path_modifications(module_context) + + +def _get_parent_dir_with_file(path: Path, filename): + for parent in path.parents: + try: + if parent.joinpath(filename).is_file(): + return parent + except OSError: + continue + return None + + +def _get_buildout_script_paths(search_path: Path): + """ + if there is a 'buildout.cfg' file in one of the parent directories of the + given module it will return a list of all files in the buildout bin + directory that look like python files. + + :param search_path: absolute path to the module. + """ + project_root = _get_parent_dir_with_file(search_path, 'buildout.cfg') + if not project_root: + return + bin_path = project_root.joinpath('bin') + if not bin_path.exists(): + return + + for filename in os.listdir(bin_path): + try: + filepath = bin_path.joinpath(filename) + with open(filepath, 'r') as f: + firstline = f.readline() + if firstline.startswith('#!') and 'python' in firstline: + yield filepath + except (UnicodeDecodeError, IOError) as e: + # Probably a binary file; permission error or race cond. because + # file got deleted. Ignore it. + debug.warning(str(e)) + continue + + +def remove_python_path_suffix(path): + for suffix in all_suffixes() + ['.pyi']: + if path.suffix == suffix: + path = path.with_name(path.stem) + break + return path + + +def transform_path_to_dotted(sys_path, module_path): + """ + Returns the dotted path inside a sys.path as a list of names. e.g. + + >>> transform_path_to_dotted([str(Path("/foo").absolute())], Path('/foo/bar/baz.py').absolute()) + (('bar', 'baz'), False) + + Returns (None, False) if the path doesn't really resolve to anything. + The second return part is if it is a package. + """ + # First remove the suffix. + module_path = remove_python_path_suffix(module_path) + if module_path.name.startswith('.'): + return None, False + + # Once the suffix was removed we are using the files as we know them. This + # means that if someone uses an ending like .vim for a Python file, .vim + # will be part of the returned dotted part. + + is_package = module_path.name == '__init__' + if is_package: + module_path = module_path.parent + + def iter_potential_solutions(): + for p in sys_path: + if str(module_path).startswith(p): + # Strip the trailing slash/backslash + rest = str(module_path)[len(p):] + # On Windows a path can also use a slash. + if rest.startswith(os.path.sep) or rest.startswith('/'): + # Remove a slash in cases it's still there. + rest = rest[1:] + + if rest: + split = rest.split(os.path.sep) + if not all(split): + # This means that part of the file path was empty, this + # is very strange and is probably a file that is called + # `.py`. + return + # Stub folders for foo can end with foo-stubs. Just remove + # it. + yield tuple(re.sub(r'-stubs$', '', s) for s in split) + + potential_solutions = tuple(iter_potential_solutions()) + if not potential_solutions: + return None, False + # Try to find the shortest path, this makes more sense usually, because the + # user usually has venvs somewhere. This means that a path like + # .tox/py37/lib/python3.7/os.py can be normal for a file. However in that + # case we definitely want to return ['os'] as a path and not a crazy + # ['.tox', 'py37', 'lib', 'python3.7', 'os']. Keep in mind that this is a + # heuristic and there's now ay to "always" do it right. + return sorted(potential_solutions, key=lambda p: len(p))[0], is_package diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/utils.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/utils.py new file mode 100644 index 000000000..ab10bcd94 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/utils.py @@ -0,0 +1,90 @@ +""" A universal module with functions / classes without dependencies. """ +import functools +import re +import os + + +_sep = os.path.sep +if os.path.altsep is not None: + _sep += os.path.altsep +_path_re = re.compile(r'(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep))) +del _sep + + +def to_list(func): + def wrapper(*args, **kwargs): + return list(func(*args, **kwargs)) + return wrapper + + +def to_tuple(func): + def wrapper(*args, **kwargs): + return tuple(func(*args, **kwargs)) + return wrapper + + +def unite(iterable): + """Turns a two dimensional array into a one dimensional.""" + return set(typ for types in iterable for typ in types) + + +class UncaughtAttributeError(Exception): + """ + Important, because `__getattr__` and `hasattr` catch AttributeErrors + implicitly. This is really evil (mainly because of `__getattr__`). + Therefore this class originally had to be derived from `BaseException` + instead of `Exception`. But because I removed relevant `hasattr` from + the code base, we can now switch back to `Exception`. + + :param base: return values of sys.exc_info(). + """ + + +def safe_property(func): + return property(reraise_uncaught(func)) + + +def reraise_uncaught(func): + """ + Re-throw uncaught `AttributeError`. + + Usage: Put ``@rethrow_uncaught`` in front of the function + which does **not** suppose to raise `AttributeError`. + + AttributeError is easily get caught by `hasattr` and another + ``except AttributeError`` clause. This becomes problem when you use + a lot of "dynamic" attributes (e.g., using ``@property``) because you + can't distinguish if the property does not exist for real or some code + inside of the "dynamic" attribute through that error. In a well + written code, such error should not exist but getting there is very + difficult. This decorator is to help us getting there by changing + `AttributeError` to `UncaughtAttributeError` to avoid unexpected catch. + This helps us noticing bugs earlier and facilitates debugging. + """ + @functools.wraps(func) + def wrapper(*args, **kwds): + try: + return func(*args, **kwds) + except AttributeError as e: + raise UncaughtAttributeError(e) from e + return wrapper + + +class PushBackIterator: + def __init__(self, iterator): + self.pushes = [] + self.iterator = iterator + self.current = None + + def push_back(self, value): + self.pushes.append(value) + + def __iter__(self): + return self + + def __next__(self): + if self.pushes: + self.current = self.pushes.pop() + else: + self.current = next(self.iterator) + return self.current diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/__init__.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/__init__.py new file mode 100644 index 000000000..621642155 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/__init__.py @@ -0,0 +1,9 @@ +# Re-export symbols for wider use. We configure mypy and flake8 to be aware that +# this file does this. + +from jedi.inference.value.module import ModuleValue +from jedi.inference.value.klass import ClassValue +from jedi.inference.value.function import FunctionValue, \ + MethodValue +from jedi.inference.value.instance import AnonymousInstance, BoundMethod, \ + CompiledInstance, AbstractInstanceValue, TreeInstance diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/decorator.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/decorator.py new file mode 100644 index 000000000..69c4cb6ae --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/decorator.py @@ -0,0 +1,34 @@ +''' +Decorators are not really values, however we need some wrappers to improve +docstrings and other things around decorators. +''' + +from jedi.inference.base_value import ValueWrapper, ValueSet + + +class Decoratee(ValueWrapper): + def __init__(self, wrapped_value, original_value): + super().__init__(wrapped_value) + self._original_value = original_value + + def py__doc__(self): + return self._original_value.py__doc__() + + def py__get__(self, instance, class_value): + return ValueSet( + Decoratee(v, self._original_value) + for v in self._wrapped_value.py__get__(instance, class_value) + ) + + def get_signatures(self): + signatures = self._wrapped_value.get_signatures() + if signatures: + return signatures + # Fallback to signatures of the original function/class if the + # decorator has no signature or it is not inferrable. + # + # __get__ means that it's a descriptor. In that case we don't return + # signatures, because they are usually properties. + if not self._wrapped_value.py__getattribute__('__get__'): + return self._original_value.get_signatures() + return [] diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/dynamic_arrays.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/dynamic_arrays.py new file mode 100644 index 000000000..4353a4887 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/dynamic_arrays.py @@ -0,0 +1,200 @@ +""" +A module to deal with stuff like `list.append` and `set.add`. + +Array modifications +******************* + +If the content of an array (``set``/``list``) is requested somewhere, the +current module will be checked for appearances of ``arr.append``, +``arr.insert``, etc. If the ``arr`` name points to an actual array, the +content will be added + +This can be really cpu intensive, as you can imagine. Because |jedi| has to +follow **every** ``append`` and check whether it's the right array. However this +works pretty good, because in *slow* cases, the recursion detector and other +settings will stop this process. + +It is important to note that: + +1. Array modfications work only in the current module. +2. Jedi only checks Array additions; ``list.pop``, etc are ignored. +""" +from jedi import debug +from jedi import settings +from jedi.inference import recursion +from jedi.inference.base_value import ValueSet, NO_VALUES, HelperValueMixin, \ + ValueWrapper +from jedi.inference.lazy_value import LazyKnownValues +from jedi.inference.helpers import infer_call_of_leaf +from jedi.inference.cache import inference_state_method_cache + +_sentinel = object() + + +def check_array_additions(context, sequence): + """ Just a mapper function for the internal _internal_check_array_additions """ + if sequence.array_type not in ('list', 'set'): + # TODO also check for dict updates + return NO_VALUES + + return _internal_check_array_additions(context, sequence) + + +@inference_state_method_cache(default=NO_VALUES) +@debug.increase_indent +def _internal_check_array_additions(context, sequence): + """ + Checks if a `Array` has "add" (append, insert, extend) statements: + + >>> a = [""] + >>> a.append(1) + """ + from jedi.inference import arguments + + debug.dbg('Dynamic array search for %s' % sequence, color='MAGENTA') + module_context = context.get_root_context() + if not settings.dynamic_array_additions or module_context.is_compiled(): + debug.dbg('Dynamic array search aborted.', color='MAGENTA') + return NO_VALUES + + def find_additions(context, arglist, add_name): + params = list(arguments.TreeArguments(context.inference_state, context, arglist).unpack()) + result = set() + if add_name in ['insert']: + params = params[1:] + if add_name in ['append', 'add', 'insert']: + for key, lazy_value in params: + result.add(lazy_value) + elif add_name in ['extend', 'update']: + for key, lazy_value in params: + result |= set(lazy_value.infer().iterate()) + return result + + temp_param_add, settings.dynamic_params_for_other_modules = \ + settings.dynamic_params_for_other_modules, False + + is_list = sequence.name.string_name == 'list' + search_names = (['append', 'extend', 'insert'] if is_list else ['add', 'update']) + + added_types = set() + for add_name in search_names: + try: + possible_names = module_context.tree_node.get_used_names()[add_name] + except KeyError: + continue + else: + for name in possible_names: + value_node = context.tree_node + if not (value_node.start_pos < name.start_pos < value_node.end_pos): + continue + trailer = name.parent + power = trailer.parent + trailer_pos = power.children.index(trailer) + try: + execution_trailer = power.children[trailer_pos + 1] + except IndexError: + continue + else: + if execution_trailer.type != 'trailer' \ + or execution_trailer.children[0] != '(' \ + or execution_trailer.children[1] == ')': + continue + + random_context = context.create_context(name) + + with recursion.execution_allowed(context.inference_state, power) as allowed: + if allowed: + found = infer_call_of_leaf( + random_context, + name, + cut_own_trailer=True + ) + if sequence in found: + # The arrays match. Now add the results + added_types |= find_additions( + random_context, + execution_trailer.children[1], + add_name + ) + + # reset settings + settings.dynamic_params_for_other_modules = temp_param_add + debug.dbg('Dynamic array result %s', added_types, color='MAGENTA') + return added_types + + +def get_dynamic_array_instance(instance, arguments): + """Used for set() and list() instances.""" + ai = _DynamicArrayAdditions(instance, arguments) + from jedi.inference import arguments + return arguments.ValuesArguments([ValueSet([ai])]) + + +class _DynamicArrayAdditions(HelperValueMixin): + """ + Used for the usage of set() and list(). + This is definitely a hack, but a good one :-) + It makes it possible to use set/list conversions. + + This is not a proper context, because it doesn't have to be. It's not used + in the wild, it's just used within typeshed as an argument to `__init__` + for set/list and never used in any other place. + """ + def __init__(self, instance, arguments): + self._instance = instance + self._arguments = arguments + + def py__class__(self): + tuple_, = self._instance.inference_state.builtins_module.py__getattribute__('tuple') + return tuple_ + + def py__iter__(self, contextualized_node=None): + arguments = self._arguments + try: + _, lazy_value = next(arguments.unpack()) + except StopIteration: + pass + else: + yield from lazy_value.infer().iterate() + + from jedi.inference.arguments import TreeArguments + if isinstance(arguments, TreeArguments): + additions = _internal_check_array_additions(arguments.context, self._instance) + yield from additions + + def iterate(self, contextualized_node=None, is_async=False): + return self.py__iter__(contextualized_node) + + +class _Modification(ValueWrapper): + def __init__(self, wrapped_value, assigned_values, contextualized_key): + super().__init__(wrapped_value) + self._assigned_values = assigned_values + self._contextualized_key = contextualized_key + + def py__getitem__(self, *args, **kwargs): + return self._wrapped_value.py__getitem__(*args, **kwargs) | self._assigned_values + + def py__simple_getitem__(self, index): + actual = [ + v.get_safe_value(_sentinel) + for v in self._contextualized_key.infer() + ] + if index in actual: + return self._assigned_values + return self._wrapped_value.py__simple_getitem__(index) + + +class DictModification(_Modification): + def py__iter__(self, contextualized_node=None): + yield from self._wrapped_value.py__iter__(contextualized_node) + yield self._contextualized_key + + def get_key_values(self): + return self._wrapped_value.get_key_values() | self._contextualized_key.infer() + + +class ListModification(_Modification): + def py__iter__(self, contextualized_node=None): + yield from self._wrapped_value.py__iter__(contextualized_node) + yield LazyKnownValues(self._assigned_values) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/function.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/function.py new file mode 100644 index 000000000..a89e9c886 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/function.py @@ -0,0 +1,461 @@ +from parso.python import tree + +from jedi import debug +from jedi.inference.cache import inference_state_method_cache, CachedMetaClass +from jedi.inference import compiled +from jedi.inference import recursion +from jedi.inference import docstrings +from jedi.inference import flow_analysis +from jedi.inference.signature import TreeSignature +from jedi.inference.filters import ParserTreeFilter, FunctionExecutionFilter, \ + AnonymousFunctionExecutionFilter +from jedi.inference.names import ValueName, AbstractNameDefinition, \ + AnonymousParamName, ParamName, NameWrapper +from jedi.inference.base_value import ContextualizedNode, NO_VALUES, \ + ValueSet, TreeValue, ValueWrapper +from jedi.inference.lazy_value import LazyKnownValues, LazyKnownValue, \ + LazyTreeValue +from jedi.inference.context import ValueContext, TreeContextMixin +from jedi.inference.value import iterable +from jedi import parser_utils +from jedi.inference.parser_cache import get_yield_exprs +from jedi.inference.helpers import values_from_qualified_names +from jedi.inference.gradual.generics import TupleGenericManager + + +class LambdaName(AbstractNameDefinition): + string_name = '' + api_type = 'function' + + def __init__(self, lambda_value): + self._lambda_value = lambda_value + self.parent_context = lambda_value.parent_context + + @property + def start_pos(self): + return self._lambda_value.tree_node.start_pos + + def infer(self): + return ValueSet([self._lambda_value]) + + +class FunctionAndClassBase(TreeValue): + def get_qualified_names(self): + if self.parent_context.is_class(): + n = self.parent_context.get_qualified_names() + if n is None: + # This means that the parent class lives within a function. + return None + return n + (self.py__name__(),) + elif self.parent_context.is_module(): + return (self.py__name__(),) + else: + return None + + +class FunctionMixin: + api_type = 'function' + + def get_filters(self, origin_scope=None): + cls = self.py__class__() + for instance in cls.execute_with_values(): + yield from instance.get_filters(origin_scope=origin_scope) + + def py__get__(self, instance, class_value): + from jedi.inference.value.instance import BoundMethod + if instance is None: + # Calling the Foo.bar results in the original bar function. + return ValueSet([self]) + return ValueSet([BoundMethod(instance, class_value.as_context(), self)]) + + def get_param_names(self): + return [AnonymousParamName(self, param.name) + for param in self.tree_node.get_params()] + + @property + def name(self): + if self.tree_node.type == 'lambdef': + return LambdaName(self) + return ValueName(self, self.tree_node.name) + + def is_function(self): + return True + + def py__name__(self): + return self.name.string_name + + def get_type_hint(self, add_class_info=True): + return_annotation = self.tree_node.annotation + if return_annotation is None: + def param_name_to_str(n): + s = n.string_name + annotation = n.infer().get_type_hint() + if annotation is not None: + s += ': ' + annotation + if n.default_node is not None: + s += '=' + n.default_node.get_code(include_prefix=False) + return s + + function_execution = self.as_context() + result = function_execution.infer() + return_hint = result.get_type_hint() + body = self.py__name__() + '(%s)' % ', '.join([ + param_name_to_str(n) + for n in function_execution.get_param_names() + ]) + if return_hint is None: + return body + else: + return_hint = return_annotation.get_code(include_prefix=False) + body = self.py__name__() + self.tree_node.children[2].get_code(include_prefix=False) + + return body + ' -> ' + return_hint + + def py__call__(self, arguments): + function_execution = self.as_context(arguments) + return function_execution.infer() + + def _as_context(self, arguments=None): + if arguments is None: + return AnonymousFunctionExecution(self) + return FunctionExecutionContext(self, arguments) + + def get_signatures(self): + return [TreeSignature(f) for f in self.get_signature_functions()] + + +class FunctionValue(FunctionMixin, FunctionAndClassBase, metaclass=CachedMetaClass): + @classmethod + def from_context(cls, context, tree_node): + def create(tree_node): + if context.is_class(): + return MethodValue( + context.inference_state, + context, + parent_context=parent_context, + tree_node=tree_node + ) + else: + return cls( + context.inference_state, + parent_context=parent_context, + tree_node=tree_node + ) + + overloaded_funcs = list(_find_overload_functions(context, tree_node)) + + parent_context = context + while parent_context.is_class() or parent_context.is_instance(): + parent_context = parent_context.parent_context + + function = create(tree_node) + + if overloaded_funcs: + return OverloadedFunctionValue( + function, + # Get them into the correct order: lower line first. + list(reversed([create(f) for f in overloaded_funcs])) + ) + return function + + def py__class__(self): + c, = values_from_qualified_names(self.inference_state, 'types', 'FunctionType') + return c + + def get_default_param_context(self): + return self.parent_context + + def get_signature_functions(self): + return [self] + + +class FunctionNameInClass(NameWrapper): + def __init__(self, class_context, name): + super().__init__(name) + self._class_context = class_context + + def get_defining_qualified_value(self): + return self._class_context.get_value() # Might be None. + + +class MethodValue(FunctionValue): + def __init__(self, inference_state, class_context, *args, **kwargs): + super().__init__(inference_state, *args, **kwargs) + self.class_context = class_context + + def get_default_param_context(self): + return self.class_context + + def get_qualified_names(self): + # Need to implement this, because the parent value of a method + # value is not the class value but the module. + names = self.class_context.get_qualified_names() + if names is None: + return None + return names + (self.py__name__(),) + + @property + def name(self): + return FunctionNameInClass(self.class_context, super().name) + + +class BaseFunctionExecutionContext(ValueContext, TreeContextMixin): + def infer_annotations(self): + raise NotImplementedError + + @inference_state_method_cache(default=NO_VALUES) + @recursion.execution_recursion_decorator() + def get_return_values(self, check_yields=False): + funcdef = self.tree_node + if funcdef.type == 'lambdef': + return self.infer_node(funcdef.children[-1]) + + if check_yields: + value_set = NO_VALUES + returns = get_yield_exprs(self.inference_state, funcdef) + else: + value_set = self.infer_annotations() + if value_set: + # If there are annotations, prefer them over anything else. + # This will make it faster. + return value_set + value_set |= docstrings.infer_return_types(self._value) + returns = funcdef.iter_return_stmts() + + for r in returns: + if check_yields: + value_set |= ValueSet.from_sets( + lazy_value.infer() + for lazy_value in self._get_yield_lazy_value(r) + ) + else: + check = flow_analysis.reachability_check(self, funcdef, r) + if check is flow_analysis.UNREACHABLE: + debug.dbg('Return unreachable: %s', r) + else: + try: + children = r.children + except AttributeError: + ctx = compiled.builtin_from_name(self.inference_state, 'None') + value_set |= ValueSet([ctx]) + else: + value_set |= self.infer_node(children[1]) + if check is flow_analysis.REACHABLE: + debug.dbg('Return reachable: %s', r) + break + return value_set + + def _get_yield_lazy_value(self, yield_expr): + if yield_expr.type == 'keyword': + # `yield` just yields None. + ctx = compiled.builtin_from_name(self.inference_state, 'None') + yield LazyKnownValue(ctx) + return + + node = yield_expr.children[1] + if node.type == 'yield_arg': # It must be a yield from. + cn = ContextualizedNode(self, node.children[1]) + yield from cn.infer().iterate(cn) + else: + yield LazyTreeValue(self, node) + + @recursion.execution_recursion_decorator(default=iter([])) + def get_yield_lazy_values(self, is_async=False): + # TODO: if is_async, wrap yield statements in Awaitable/async_generator_asend + for_parents = [(y, tree.search_ancestor(y, 'for_stmt', 'funcdef', + 'while_stmt', 'if_stmt')) + for y in get_yield_exprs(self.inference_state, self.tree_node)] + + # Calculate if the yields are placed within the same for loop. + yields_order = [] + last_for_stmt = None + for yield_, for_stmt in for_parents: + # For really simple for loops we can predict the order. Otherwise + # we just ignore it. + parent = for_stmt.parent + if parent.type == 'suite': + parent = parent.parent + if for_stmt.type == 'for_stmt' and parent == self.tree_node \ + and parser_utils.for_stmt_defines_one_name(for_stmt): # Simplicity for now. + if for_stmt == last_for_stmt: + yields_order[-1][1].append(yield_) + else: + yields_order.append((for_stmt, [yield_])) + elif for_stmt == self.tree_node: + yields_order.append((None, [yield_])) + else: + types = self.get_return_values(check_yields=True) + if types: + yield LazyKnownValues(types, min=0, max=float('inf')) + return + last_for_stmt = for_stmt + + for for_stmt, yields in yields_order: + if for_stmt is None: + # No for_stmt, just normal yields. + for yield_ in yields: + yield from self._get_yield_lazy_value(yield_) + else: + input_node = for_stmt.get_testlist() + cn = ContextualizedNode(self, input_node) + ordered = cn.infer().iterate(cn) + ordered = list(ordered) + for lazy_value in ordered: + dct = {str(for_stmt.children[1].value): lazy_value.infer()} + with self.predefine_names(for_stmt, dct): + for yield_in_same_for_stmt in yields: + yield from self._get_yield_lazy_value(yield_in_same_for_stmt) + + def merge_yield_values(self, is_async=False): + return ValueSet.from_sets( + lazy_value.infer() + for lazy_value in self.get_yield_lazy_values() + ) + + def is_generator(self): + return bool(get_yield_exprs(self.inference_state, self.tree_node)) + + def infer(self): + """ + Created to be used by inheritance. + """ + inference_state = self.inference_state + is_coroutine = self.tree_node.parent.type in ('async_stmt', 'async_funcdef') + from jedi.inference.gradual.base import GenericClass + + if is_coroutine: + if self.is_generator(): + async_generator_classes = inference_state.typing_module \ + .py__getattribute__('AsyncGenerator') + + yield_values = self.merge_yield_values(is_async=True) + # The contravariant doesn't seem to be defined. + generics = (yield_values.py__class__(), NO_VALUES) + return ValueSet( + GenericClass(c, TupleGenericManager(generics)) + for c in async_generator_classes + ).execute_annotation() + else: + async_classes = inference_state.typing_module.py__getattribute__('Coroutine') + return_values = self.get_return_values() + # Only the first generic is relevant. + generics = (return_values.py__class__(), NO_VALUES, NO_VALUES) + return ValueSet( + GenericClass(c, TupleGenericManager(generics)) for c in async_classes + ).execute_annotation() + else: + # If there are annotations, prefer them over anything else. + if self.is_generator() and not self.infer_annotations(): + return ValueSet([iterable.Generator(inference_state, self)]) + else: + return self.get_return_values() + + +class FunctionExecutionContext(BaseFunctionExecutionContext): + def __init__(self, function_value, arguments): + super().__init__(function_value) + self._arguments = arguments + + def get_filters(self, until_position=None, origin_scope=None): + yield FunctionExecutionFilter( + self, self._value, + until_position=until_position, + origin_scope=origin_scope, + arguments=self._arguments + ) + + def infer_annotations(self): + from jedi.inference.gradual.annotation import infer_return_types + return infer_return_types(self._value, self._arguments) + + def get_param_names(self): + return [ + ParamName(self._value, param.name, self._arguments) + for param in self._value.tree_node.get_params() + ] + + +class AnonymousFunctionExecution(BaseFunctionExecutionContext): + def infer_annotations(self): + # I don't think inferring anonymous executions is a big thing. + # Anonymous contexts are mostly there for the user to work in. ~ dave + return NO_VALUES + + def get_filters(self, until_position=None, origin_scope=None): + yield AnonymousFunctionExecutionFilter( + self, self._value, + until_position=until_position, + origin_scope=origin_scope, + ) + + def get_param_names(self): + return self._value.get_param_names() + + +class OverloadedFunctionValue(FunctionMixin, ValueWrapper): + def __init__(self, function, overloaded_functions): + super().__init__(function) + self._overloaded_functions = overloaded_functions + + def py__call__(self, arguments): + debug.dbg("Execute overloaded function %s", self._wrapped_value, color='BLUE') + function_executions = [] + for signature in self.get_signatures(): + function_execution = signature.value.as_context(arguments) + function_executions.append(function_execution) + if signature.matches_signature(arguments): + return function_execution.infer() + + if self.inference_state.is_analysis: + # In this case we want precision. + return NO_VALUES + return ValueSet.from_sets(fe.infer() for fe in function_executions) + + def get_signature_functions(self): + return self._overloaded_functions + + def get_type_hint(self, add_class_info=True): + return 'Union[%s]' % ', '.join(f.get_type_hint() for f in self._overloaded_functions) + + +def _find_overload_functions(context, tree_node): + def _is_overload_decorated(funcdef): + if funcdef.parent.type == 'decorated': + decorators = funcdef.parent.children[0] + if decorators.type == 'decorator': + decorators = [decorators] + else: + decorators = decorators.children + for decorator in decorators: + dotted_name = decorator.children[1] + if dotted_name.type == 'name' and dotted_name.value == 'overload': + # TODO check with values if it's the right overload + return True + return False + + if tree_node.type == 'lambdef': + return + + if _is_overload_decorated(tree_node): + yield tree_node + + while True: + filter = ParserTreeFilter( + context, + until_position=tree_node.start_pos + ) + names = filter.get(tree_node.name.value) + assert isinstance(names, list) + if not names: + break + + found = False + for name in names: + funcdef = name.tree_name.parent + if funcdef.type == 'funcdef' and _is_overload_decorated(funcdef): + tree_node = funcdef + found = True + yield funcdef + + if not found: + break diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/instance.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/instance.py new file mode 100644 index 000000000..63f220e0a --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/instance.py @@ -0,0 +1,610 @@ +from abc import abstractproperty + +from parso.tree import search_ancestor + +from jedi import debug +from jedi import settings +from jedi.inference import compiled +from jedi.inference.compiled.value import CompiledValueFilter +from jedi.inference.helpers import values_from_qualified_names, is_big_annoying_library +from jedi.inference.filters import AbstractFilter, AnonymousFunctionExecutionFilter +from jedi.inference.names import ValueName, TreeNameDefinition, ParamName, \ + NameWrapper +from jedi.inference.base_value import Value, NO_VALUES, ValueSet, \ + iterator_to_value_set, ValueWrapper +from jedi.inference.lazy_value import LazyKnownValue, LazyKnownValues +from jedi.inference.cache import inference_state_method_cache +from jedi.inference.arguments import ValuesArguments, TreeArgumentsWrapper +from jedi.inference.value.function import \ + FunctionValue, FunctionMixin, OverloadedFunctionValue, \ + BaseFunctionExecutionContext, FunctionExecutionContext, FunctionNameInClass +from jedi.inference.value.klass import ClassFilter +from jedi.inference.value.dynamic_arrays import get_dynamic_array_instance +from jedi.parser_utils import function_is_staticmethod, function_is_classmethod + + +class InstanceExecutedParamName(ParamName): + def __init__(self, instance, function_value, tree_name): + super().__init__( + function_value, tree_name, arguments=None) + self._instance = instance + + def infer(self): + return ValueSet([self._instance]) + + def matches_signature(self): + return True + + +class AnonymousMethodExecutionFilter(AnonymousFunctionExecutionFilter): + def __init__(self, instance, *args, **kwargs): + super().__init__(*args, **kwargs) + self._instance = instance + + def _convert_param(self, param, name): + if param.position_index == 0: + if function_is_classmethod(self._function_value.tree_node): + return InstanceExecutedParamName( + self._instance.py__class__(), + self._function_value, + name + ) + elif not function_is_staticmethod(self._function_value.tree_node): + return InstanceExecutedParamName( + self._instance, + self._function_value, + name + ) + return super()._convert_param(param, name) + + +class AnonymousMethodExecutionContext(BaseFunctionExecutionContext): + def __init__(self, instance, value): + super().__init__(value) + self.instance = instance + + def get_filters(self, until_position=None, origin_scope=None): + yield AnonymousMethodExecutionFilter( + self.instance, self, self._value, + until_position=until_position, + origin_scope=origin_scope, + ) + + def get_param_names(self): + param_names = list(self._value.get_param_names()) + # set the self name + param_names[0] = InstanceExecutedParamName( + self.instance, + self._value, + param_names[0].tree_name + ) + return param_names + + +class MethodExecutionContext(FunctionExecutionContext): + def __init__(self, instance, *args, **kwargs): + super().__init__(*args, **kwargs) + self.instance = instance + + +class AbstractInstanceValue(Value): + api_type = 'instance' + + def __init__(self, inference_state, parent_context, class_value): + super().__init__(inference_state, parent_context) + # Generated instances are classes that are just generated by self + # (No arguments) used. + self.class_value = class_value + + def is_instance(self): + return True + + def get_qualified_names(self): + return self.class_value.get_qualified_names() + + def get_annotated_class_object(self): + return self.class_value # This is the default. + + def py__class__(self): + return self.class_value + + def py__bool__(self): + # Signalize that we don't know about the bool type. + return None + + @abstractproperty + def name(self): + raise NotImplementedError + + def get_signatures(self): + call_funcs = self.py__getattribute__('__call__').py__get__(self, self.class_value) + return [s.bind(self) for s in call_funcs.get_signatures()] + + def get_function_slot_names(self, name): + # Python classes don't look at the dictionary of the instance when + # looking up `__call__`. This is something that has to do with Python's + # internal slot system (note: not __slots__, but C slots). + for filter in self.get_filters(include_self_names=False): + names = filter.get(name) + if names: + return names + return [] + + def execute_function_slots(self, names, *inferred_args): + return ValueSet.from_sets( + name.infer().execute_with_values(*inferred_args) + for name in names + ) + + def get_type_hint(self, add_class_info=True): + return self.py__name__() + + def py__getitem__(self, index_value_set, contextualized_node): + names = self.get_function_slot_names('__getitem__') + if not names: + return super().py__getitem__( + index_value_set, + contextualized_node, + ) + + args = ValuesArguments([index_value_set]) + return ValueSet.from_sets(name.infer().execute(args) for name in names) + + def py__iter__(self, contextualized_node=None): + iter_slot_names = self.get_function_slot_names('__iter__') + if not iter_slot_names: + return super().py__iter__(contextualized_node) + + def iterate(): + for generator in self.execute_function_slots(iter_slot_names): + yield from generator.py__next__(contextualized_node) + return iterate() + + def __repr__(self): + return "<%s of %s>" % (self.__class__.__name__, self.class_value) + + +class CompiledInstance(AbstractInstanceValue): + # This is not really a compiled class, it's just an instance from a + # compiled class. + def __init__(self, inference_state, parent_context, class_value, arguments): + super().__init__(inference_state, parent_context, class_value) + self._arguments = arguments + + def get_filters(self, origin_scope=None, include_self_names=True): + class_value = self.get_annotated_class_object() + class_filters = class_value.get_filters( + origin_scope=origin_scope, + is_instance=True, + ) + for f in class_filters: + yield CompiledInstanceClassFilter(self, f) + + @property + def name(self): + return compiled.CompiledValueName(self, self.class_value.name.string_name) + + def is_stub(self): + return False + + +class _BaseTreeInstance(AbstractInstanceValue): + @property + def array_type(self): + name = self.class_value.py__name__() + if name in ['list', 'set', 'dict'] \ + and self.parent_context.get_root_context().is_builtins_module(): + return name + return None + + @property + def name(self): + return ValueName(self, self.class_value.name.tree_name) + + def get_filters(self, origin_scope=None, include_self_names=True): + class_value = self.get_annotated_class_object() + if include_self_names: + for cls in class_value.py__mro__(): + if not cls.is_compiled(): + # In this case we're excluding compiled objects that are + # not fake objects. It doesn't make sense for normal + # compiled objects to search for self variables. + yield SelfAttributeFilter(self, class_value, cls.as_context(), origin_scope) + + class_filters = class_value.get_filters( + origin_scope=origin_scope, + is_instance=True, + ) + for f in class_filters: + if isinstance(f, ClassFilter): + yield InstanceClassFilter(self, f) + elif isinstance(f, CompiledValueFilter): + yield CompiledInstanceClassFilter(self, f) + else: + # Propably from the metaclass. + yield f + + @inference_state_method_cache() + def create_instance_context(self, class_context, node): + new = node + while True: + func_node = new + new = search_ancestor(new, 'funcdef', 'classdef') + if class_context.tree_node is new: + func = FunctionValue.from_context(class_context, func_node) + bound_method = BoundMethod(self, class_context, func) + if func_node.name.value == '__init__': + context = bound_method.as_context(self._arguments) + else: + context = bound_method.as_context() + break + return context.create_context(node) + + def py__getattribute__alternatives(self, string_name): + ''' + Since nothing was inferred, now check the __getattr__ and + __getattribute__ methods. Stubs don't need to be checked, because + they don't contain any logic. + ''' + if self.is_stub(): + return NO_VALUES + + name = compiled.create_simple_object(self.inference_state, string_name) + + # This is a little bit special. `__getattribute__` is in Python + # executed before `__getattr__`. But: I know no use case, where + # this could be practical and where Jedi would return wrong types. + # If you ever find something, let me know! + # We are inversing this, because a hand-crafted `__getattribute__` + # could still call another hand-crafted `__getattr__`, but not the + # other way around. + if is_big_annoying_library(self.parent_context): + return NO_VALUES + names = (self.get_function_slot_names('__getattr__') + or self.get_function_slot_names('__getattribute__')) + return self.execute_function_slots(names, name) + + def py__next__(self, contextualized_node=None): + name = u'__next__' + next_slot_names = self.get_function_slot_names(name) + if next_slot_names: + yield LazyKnownValues( + self.execute_function_slots(next_slot_names) + ) + else: + debug.warning('Instance has no __next__ function in %s.', self) + + def py__call__(self, arguments): + names = self.get_function_slot_names('__call__') + if not names: + # Means the Instance is not callable. + return super().py__call__(arguments) + + return ValueSet.from_sets(name.infer().execute(arguments) for name in names) + + def py__get__(self, instance, class_value): + """ + obj may be None. + """ + # Arguments in __get__ descriptors are obj, class. + # `method` is the new parent of the array, don't know if that's good. + for cls in self.class_value.py__mro__(): + result = cls.py__get__on_class(self, instance, class_value) + if result is not NotImplemented: + return result + + names = self.get_function_slot_names('__get__') + if names: + if instance is None: + instance = compiled.builtin_from_name(self.inference_state, 'None') + return self.execute_function_slots(names, instance, class_value) + else: + return ValueSet([self]) + + +class TreeInstance(_BaseTreeInstance): + def __init__(self, inference_state, parent_context, class_value, arguments): + # I don't think that dynamic append lookups should happen here. That + # sounds more like something that should go to py__iter__. + if class_value.py__name__() in ['list', 'set'] \ + and parent_context.get_root_context().is_builtins_module(): + # compare the module path with the builtin name. + if settings.dynamic_array_additions: + arguments = get_dynamic_array_instance(self, arguments) + + super().__init__(inference_state, parent_context, class_value) + self._arguments = arguments + self.tree_node = class_value.tree_node + + # This can recurse, if the initialization of the class includes a reference + # to itself. + @inference_state_method_cache(default=None) + def _get_annotated_class_object(self): + from jedi.inference.gradual.annotation import py__annotations__, \ + infer_type_vars_for_execution + + args = InstanceArguments(self, self._arguments) + for signature in self.class_value.py__getattribute__('__init__').get_signatures(): + # Just take the first result, it should always be one, because we + # control the typeshed code. + funcdef = signature.value.tree_node + if funcdef is None or funcdef.type != 'funcdef' \ + or not signature.matches_signature(args): + # First check if the signature even matches, if not we don't + # need to infer anything. + continue + bound_method = BoundMethod(self, self.class_value.as_context(), signature.value) + all_annotations = py__annotations__(funcdef) + type_var_dict = infer_type_vars_for_execution(bound_method, args, all_annotations) + if type_var_dict: + defined, = self.class_value.define_generics( + infer_type_vars_for_execution(signature.value, args, all_annotations), + ) + debug.dbg('Inferred instance value as %s', defined, color='BLUE') + return defined + return None + + def get_annotated_class_object(self): + return self._get_annotated_class_object() or self.class_value + + def get_key_values(self): + values = NO_VALUES + if self.array_type == 'dict': + for i, (key, instance) in enumerate(self._arguments.unpack()): + if key is None and i == 0: + values |= ValueSet.from_sets( + v.get_key_values() + for v in instance.infer() + if v.array_type == 'dict' + ) + if key: + values |= ValueSet([compiled.create_simple_object( + self.inference_state, + key, + )]) + + return values + + def py__simple_getitem__(self, index): + if self.array_type == 'dict': + # Logic for dict({'foo': bar}) and dict(foo=bar) + # reversed, because: + # >>> dict({'a': 1}, a=3) + # {'a': 3} + # TODO tuple initializations + # >>> dict([('a', 4)]) + # {'a': 4} + for key, lazy_context in reversed(list(self._arguments.unpack())): + if key is None: + values = ValueSet.from_sets( + dct_value.py__simple_getitem__(index) + for dct_value in lazy_context.infer() + if dct_value.array_type == 'dict' + ) + if values: + return values + else: + if key == index: + return lazy_context.infer() + return super().py__simple_getitem__(index) + + def __repr__(self): + return "<%s of %s(%s)>" % (self.__class__.__name__, self.class_value, + self._arguments) + + +class AnonymousInstance(_BaseTreeInstance): + _arguments = None + + +class CompiledInstanceName(NameWrapper): + @iterator_to_value_set + def infer(self): + for result_value in self._wrapped_name.infer(): + if result_value.api_type == 'function': + yield CompiledBoundMethod(result_value) + else: + yield result_value + + +class CompiledInstanceClassFilter(AbstractFilter): + def __init__(self, instance, f): + self._instance = instance + self._class_filter = f + + def get(self, name): + return self._convert(self._class_filter.get(name)) + + def values(self): + return self._convert(self._class_filter.values()) + + def _convert(self, names): + return [CompiledInstanceName(n) for n in names] + + +class BoundMethod(FunctionMixin, ValueWrapper): + def __init__(self, instance, class_context, function): + super().__init__(function) + self.instance = instance + self._class_context = class_context + + def is_bound_method(self): + return True + + @property + def name(self): + return FunctionNameInClass( + self._class_context, + super().name + ) + + def py__class__(self): + c, = values_from_qualified_names(self.inference_state, 'types', 'MethodType') + return c + + def _get_arguments(self, arguments): + assert arguments is not None + return InstanceArguments(self.instance, arguments) + + def _as_context(self, arguments=None): + if arguments is None: + return AnonymousMethodExecutionContext(self.instance, self) + + arguments = self._get_arguments(arguments) + return MethodExecutionContext(self.instance, self, arguments) + + def py__call__(self, arguments): + if isinstance(self._wrapped_value, OverloadedFunctionValue): + return self._wrapped_value.py__call__(self._get_arguments(arguments)) + + function_execution = self.as_context(arguments) + return function_execution.infer() + + def get_signature_functions(self): + return [ + BoundMethod(self.instance, self._class_context, f) + for f in self._wrapped_value.get_signature_functions() + ] + + def get_signatures(self): + return [sig.bind(self) for sig in super().get_signatures()] + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._wrapped_value) + + +class CompiledBoundMethod(ValueWrapper): + def is_bound_method(self): + return True + + def get_signatures(self): + return [sig.bind(self) for sig in self._wrapped_value.get_signatures()] + + +class SelfName(TreeNameDefinition): + """ + This name calculates the parent_context lazily. + """ + def __init__(self, instance, class_context, tree_name): + self._instance = instance + self.class_context = class_context + self.tree_name = tree_name + + @property + def parent_context(self): + return self._instance.create_instance_context(self.class_context, self.tree_name) + + def get_defining_qualified_value(self): + return self._instance + + def infer(self): + stmt = search_ancestor(self.tree_name, 'expr_stmt') + if stmt is not None: + if stmt.children[1].type == "annassign": + from jedi.inference.gradual.annotation import infer_annotation + values = infer_annotation( + self.parent_context, stmt.children[1].children[1] + ).execute_annotation() + if values: + return values + return super().infer() + + +class LazyInstanceClassName(NameWrapper): + def __init__(self, instance, class_member_name): + super().__init__(class_member_name) + self._instance = instance + + @iterator_to_value_set + def infer(self): + for result_value in self._wrapped_name.infer(): + yield from result_value.py__get__(self._instance, self._instance.py__class__()) + + def get_signatures(self): + return self.infer().get_signatures() + + def get_defining_qualified_value(self): + return self._instance + + +class InstanceClassFilter(AbstractFilter): + """ + This filter is special in that it uses the class filter and wraps the + resulting names in LazyInstanceClassName. The idea is that the class name + filtering can be very flexible and always be reflected in instances. + """ + def __init__(self, instance, class_filter): + self._instance = instance + self._class_filter = class_filter + + def get(self, name): + return self._convert(self._class_filter.get(name)) + + def values(self): + return self._convert(self._class_filter.values()) + + def _convert(self, names): + return [ + LazyInstanceClassName(self._instance, n) + for n in names + ] + + def __repr__(self): + return '<%s for %s>' % (self.__class__.__name__, self._class_filter) + + +class SelfAttributeFilter(ClassFilter): + """ + This class basically filters all the use cases where `self.*` was assigned. + """ + def __init__(self, instance, instance_class, node_context, origin_scope): + super().__init__( + class_value=instance_class, + node_context=node_context, + origin_scope=origin_scope, + is_instance=True, + ) + self._instance = instance + + def _filter(self, names): + start, end = self._parser_scope.start_pos, self._parser_scope.end_pos + names = [n for n in names if start < n.start_pos < end] + return self._filter_self_names(names) + + def _filter_self_names(self, names): + for name in names: + trailer = name.parent + if trailer.type == 'trailer' \ + and len(trailer.parent.children) == 2 \ + and trailer.children[0] == '.': + if name.is_definition() and self._access_possible(name): + # TODO filter non-self assignments instead of this bad + # filter. + if self._is_in_right_scope(trailer.parent.children[0], name): + yield name + + def _is_in_right_scope(self, self_name, name): + self_context = self._node_context.create_context(self_name) + names = self_context.goto(self_name, position=self_name.start_pos) + return any( + n.api_type == 'param' + and n.tree_name.get_definition().position_index == 0 + and n.parent_context.tree_node is self._parser_scope + for n in names + ) + + def _convert_names(self, names): + return [SelfName(self._instance, self._node_context, name) for name in names] + + def _check_flows(self, names): + return names + + +class InstanceArguments(TreeArgumentsWrapper): + def __init__(self, instance, arguments): + super().__init__(arguments) + self.instance = instance + + def unpack(self, func=None): + yield None, LazyKnownValue(self.instance) + yield from self._wrapped_arguments.unpack(func) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/iterable.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/iterable.py new file mode 100644 index 000000000..7cc37173d --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/iterable.py @@ -0,0 +1,647 @@ +""" +Contains all classes and functions to deal with lists, dicts, generators and +iterators in general. +""" +from jedi.inference import compiled +from jedi.inference import analysis +from jedi.inference.lazy_value import LazyKnownValue, LazyKnownValues, \ + LazyTreeValue +from jedi.inference.helpers import get_int_or_none, is_string, \ + reraise_getitem_errors, SimpleGetItemNotFound +from jedi.inference.utils import safe_property, to_list +from jedi.inference.cache import inference_state_method_cache +from jedi.inference.filters import LazyAttributeOverwrite, publish_method +from jedi.inference.base_value import ValueSet, Value, NO_VALUES, \ + ContextualizedNode, iterate_values, sentinel, \ + LazyValueWrapper +from jedi.parser_utils import get_sync_comp_fors +from jedi.inference.context import CompForContext +from jedi.inference.value.dynamic_arrays import check_array_additions + + +class IterableMixin: + def py__next__(self, contextualized_node=None): + return self.py__iter__(contextualized_node) + + def py__stop_iteration_returns(self): + return ValueSet([compiled.builtin_from_name(self.inference_state, 'None')]) + + # At the moment, safe values are simple values like "foo", 1 and not + # lists/dicts. Therefore as a small speed optimization we can just do the + # default instead of resolving the lazy wrapped values, that are just + # doing this in the end as well. + # This mostly speeds up patterns like `sys.version_info >= (3, 0)` in + # typeshed. + get_safe_value = Value.get_safe_value + + +class GeneratorBase(LazyAttributeOverwrite, IterableMixin): + array_type = None + + def _get_wrapped_value(self): + instance, = self._get_cls().execute_annotation() + return instance + + def _get_cls(self): + generator, = self.inference_state.typing_module.py__getattribute__('Generator') + return generator + + def py__bool__(self): + return True + + @publish_method('__iter__') + def _iter(self, arguments): + return ValueSet([self]) + + @publish_method('send') + @publish_method('__next__') + def _next(self, arguments): + return ValueSet.from_sets(lazy_value.infer() for lazy_value in self.py__iter__()) + + def py__stop_iteration_returns(self): + return ValueSet([compiled.builtin_from_name(self.inference_state, 'None')]) + + @property + def name(self): + return compiled.CompiledValueName(self, 'Generator') + + def get_annotated_class_object(self): + from jedi.inference.gradual.generics import TupleGenericManager + gen_values = self.merge_types_of_iterate().py__class__() + gm = TupleGenericManager((gen_values, NO_VALUES, NO_VALUES)) + return self._get_cls().with_generics(gm) + + +class Generator(GeneratorBase): + """Handling of `yield` functions.""" + def __init__(self, inference_state, func_execution_context): + super().__init__(inference_state) + self._func_execution_context = func_execution_context + + def py__iter__(self, contextualized_node=None): + iterators = self._func_execution_context.infer_annotations() + if iterators: + return iterators.iterate(contextualized_node) + return self._func_execution_context.get_yield_lazy_values() + + def py__stop_iteration_returns(self): + return self._func_execution_context.get_return_values() + + def __repr__(self): + return "<%s of %s>" % (type(self).__name__, self._func_execution_context) + + +def comprehension_from_atom(inference_state, value, atom): + bracket = atom.children[0] + test_list_comp = atom.children[1] + + if bracket == '{': + if atom.children[1].children[1] == ':': + sync_comp_for = test_list_comp.children[3] + if sync_comp_for.type == 'comp_for': + sync_comp_for = sync_comp_for.children[1] + + return DictComprehension( + inference_state, + value, + sync_comp_for_node=sync_comp_for, + key_node=test_list_comp.children[0], + value_node=test_list_comp.children[2], + ) + else: + cls = SetComprehension + elif bracket == '(': + cls = GeneratorComprehension + elif bracket == '[': + cls = ListComprehension + + sync_comp_for = test_list_comp.children[1] + if sync_comp_for.type == 'comp_for': + sync_comp_for = sync_comp_for.children[1] + + return cls( + inference_state, + defining_context=value, + sync_comp_for_node=sync_comp_for, + entry_node=test_list_comp.children[0], + ) + + +class ComprehensionMixin: + @inference_state_method_cache() + def _get_comp_for_context(self, parent_context, comp_for): + return CompForContext(parent_context, comp_for) + + def _nested(self, comp_fors, parent_context=None): + comp_for = comp_fors[0] + + is_async = comp_for.parent.type == 'comp_for' + + input_node = comp_for.children[3] + parent_context = parent_context or self._defining_context + input_types = parent_context.infer_node(input_node) + + cn = ContextualizedNode(parent_context, input_node) + iterated = input_types.iterate(cn, is_async=is_async) + exprlist = comp_for.children[1] + for i, lazy_value in enumerate(iterated): + types = lazy_value.infer() + dct = unpack_tuple_to_dict(parent_context, types, exprlist) + context = self._get_comp_for_context( + parent_context, + comp_for, + ) + with context.predefine_names(comp_for, dct): + try: + yield from self._nested(comp_fors[1:], context) + except IndexError: + iterated = context.infer_node(self._entry_node) + if self.array_type == 'dict': + yield iterated, context.infer_node(self._value_node) + else: + yield iterated + + @inference_state_method_cache(default=[]) + @to_list + def _iterate(self): + comp_fors = tuple(get_sync_comp_fors(self._sync_comp_for_node)) + yield from self._nested(comp_fors) + + def py__iter__(self, contextualized_node=None): + for set_ in self._iterate(): + yield LazyKnownValues(set_) + + def __repr__(self): + return "<%s of %s>" % (type(self).__name__, self._sync_comp_for_node) + + +class _DictMixin: + def _get_generics(self): + return tuple(c_set.py__class__() for c_set in self.get_mapping_item_values()) + + +class Sequence(LazyAttributeOverwrite, IterableMixin): + api_type = 'instance' + + @property + def name(self): + return compiled.CompiledValueName(self, self.array_type) + + def _get_generics(self): + return (self.merge_types_of_iterate().py__class__(),) + + @inference_state_method_cache(default=()) + def _cached_generics(self): + return self._get_generics() + + def _get_wrapped_value(self): + from jedi.inference.gradual.base import GenericClass + from jedi.inference.gradual.generics import TupleGenericManager + klass = compiled.builtin_from_name(self.inference_state, self.array_type) + c, = GenericClass( + klass, + TupleGenericManager(self._cached_generics()) + ).execute_annotation() + return c + + def py__bool__(self): + return None # We don't know the length, because of appends. + + @safe_property + def parent(self): + return self.inference_state.builtins_module + + def py__getitem__(self, index_value_set, contextualized_node): + if self.array_type == 'dict': + return self._dict_values() + return iterate_values(ValueSet([self])) + + +class _BaseComprehension(ComprehensionMixin): + def __init__(self, inference_state, defining_context, sync_comp_for_node, entry_node): + assert sync_comp_for_node.type == 'sync_comp_for' + super().__init__(inference_state) + self._defining_context = defining_context + self._sync_comp_for_node = sync_comp_for_node + self._entry_node = entry_node + + +class ListComprehension(_BaseComprehension, Sequence): + array_type = 'list' + + def py__simple_getitem__(self, index): + if isinstance(index, slice): + return ValueSet([self]) + + all_types = list(self.py__iter__()) + with reraise_getitem_errors(IndexError, TypeError): + lazy_value = all_types[index] + return lazy_value.infer() + + +class SetComprehension(_BaseComprehension, Sequence): + array_type = 'set' + + +class GeneratorComprehension(_BaseComprehension, GeneratorBase): + pass + + +class _DictKeyMixin: + # TODO merge with _DictMixin? + def get_mapping_item_values(self): + return self._dict_keys(), self._dict_values() + + def get_key_values(self): + # TODO merge with _dict_keys? + return self._dict_keys() + + +class DictComprehension(ComprehensionMixin, Sequence, _DictKeyMixin): + array_type = 'dict' + + def __init__(self, inference_state, defining_context, sync_comp_for_node, key_node, value_node): + assert sync_comp_for_node.type == 'sync_comp_for' + super().__init__(inference_state) + self._defining_context = defining_context + self._sync_comp_for_node = sync_comp_for_node + self._entry_node = key_node + self._value_node = value_node + + def py__iter__(self, contextualized_node=None): + for keys, values in self._iterate(): + yield LazyKnownValues(keys) + + def py__simple_getitem__(self, index): + for keys, values in self._iterate(): + for k in keys: + # Be careful in the future if refactoring, index could be a + # slice object. + if k.get_safe_value(default=object()) == index: + return values + raise SimpleGetItemNotFound() + + def _dict_keys(self): + return ValueSet.from_sets(keys for keys, values in self._iterate()) + + def _dict_values(self): + return ValueSet.from_sets(values for keys, values in self._iterate()) + + @publish_method('values') + def _imitate_values(self, arguments): + lazy_value = LazyKnownValues(self._dict_values()) + return ValueSet([FakeList(self.inference_state, [lazy_value])]) + + @publish_method('items') + def _imitate_items(self, arguments): + lazy_values = [ + LazyKnownValue( + FakeTuple( + self.inference_state, + [LazyKnownValues(key), + LazyKnownValues(value)] + ) + ) + for key, value in self._iterate() + ] + + return ValueSet([FakeList(self.inference_state, lazy_values)]) + + def exact_key_items(self): + # NOTE: A smarter thing can probably done here to achieve better + # completions, but at least like this jedi doesn't crash + return [] + + +class SequenceLiteralValue(Sequence): + _TUPLE_LIKE = 'testlist_star_expr', 'testlist', 'subscriptlist' + mapping = {'(': 'tuple', + '[': 'list', + '{': 'set'} + + def __init__(self, inference_state, defining_context, atom): + super().__init__(inference_state) + self.atom = atom + self._defining_context = defining_context + + if self.atom.type in self._TUPLE_LIKE: + self.array_type = 'tuple' + else: + self.array_type = SequenceLiteralValue.mapping[atom.children[0]] + """The builtin name of the array (list, set, tuple or dict).""" + + def _get_generics(self): + if self.array_type == 'tuple': + return tuple(x.infer().py__class__() for x in self.py__iter__()) + return super()._get_generics() + + def py__simple_getitem__(self, index): + """Here the index is an int/str. Raises IndexError/KeyError.""" + if isinstance(index, slice): + return ValueSet([self]) + else: + with reraise_getitem_errors(TypeError, KeyError, IndexError): + node = self.get_tree_entries()[index] + if node == ':' or node.type == 'subscript': + return NO_VALUES + return self._defining_context.infer_node(node) + + def py__iter__(self, contextualized_node=None): + """ + While values returns the possible values for any array field, this + function returns the value for a certain index. + """ + for node in self.get_tree_entries(): + if node == ':' or node.type == 'subscript': + # TODO this should probably use at least part of the code + # of infer_subscript_list. + yield LazyKnownValue(Slice(self._defining_context, None, None, None)) + else: + yield LazyTreeValue(self._defining_context, node) + yield from check_array_additions(self._defining_context, self) + + def py__len__(self): + # This function is not really used often. It's more of a try. + return len(self.get_tree_entries()) + + def get_tree_entries(self): + c = self.atom.children + + if self.atom.type in self._TUPLE_LIKE: + return c[::2] + + array_node = c[1] + if array_node in (']', '}', ')'): + return [] # Direct closing bracket, doesn't contain items. + + if array_node.type == 'testlist_comp': + # filter out (for now) pep 448 single-star unpacking + return [value for value in array_node.children[::2] + if value.type != "star_expr"] + elif array_node.type == 'dictorsetmaker': + kv = [] + iterator = iter(array_node.children) + for key in iterator: + if key == "**": + # dict with pep 448 double-star unpacking + # for now ignoring the values imported by ** + next(iterator) + next(iterator, None) # Possible comma. + else: + op = next(iterator, None) + if op is None or op == ',': + if key.type == "star_expr": + # pep 448 single-star unpacking + # for now ignoring values imported by * + pass + else: + kv.append(key) # A set. + else: + assert op == ':' # A dict. + kv.append((key, next(iterator))) + next(iterator, None) # Possible comma. + return kv + else: + if array_node.type == "star_expr": + # pep 448 single-star unpacking + # for now ignoring values imported by * + return [] + else: + return [array_node] + + def __repr__(self): + return "<%s of %s>" % (self.__class__.__name__, self.atom) + + +class DictLiteralValue(_DictMixin, SequenceLiteralValue, _DictKeyMixin): + array_type = 'dict' + + def __init__(self, inference_state, defining_context, atom): + # Intentionally don't call the super class. This is definitely a sign + # that the architecture is bad and we should refactor. + Sequence.__init__(self, inference_state) + self._defining_context = defining_context + self.atom = atom + + def py__simple_getitem__(self, index): + """Here the index is an int/str. Raises IndexError/KeyError.""" + compiled_value_index = compiled.create_simple_object(self.inference_state, index) + for key, value in self.get_tree_entries(): + for k in self._defining_context.infer_node(key): + for key_v in k.execute_operation(compiled_value_index, '=='): + if key_v.get_safe_value(): + return self._defining_context.infer_node(value) + raise SimpleGetItemNotFound('No key found in dictionary %s.' % self) + + def py__iter__(self, contextualized_node=None): + """ + While values returns the possible values for any array field, this + function returns the value for a certain index. + """ + # Get keys. + types = NO_VALUES + for k, _ in self.get_tree_entries(): + types |= self._defining_context.infer_node(k) + # We don't know which dict index comes first, therefore always + # yield all the types. + for _ in types: + yield LazyKnownValues(types) + + @publish_method('values') + def _imitate_values(self, arguments): + lazy_value = LazyKnownValues(self._dict_values()) + return ValueSet([FakeList(self.inference_state, [lazy_value])]) + + @publish_method('items') + def _imitate_items(self, arguments): + lazy_values = [ + LazyKnownValue(FakeTuple( + self.inference_state, + (LazyTreeValue(self._defining_context, key_node), + LazyTreeValue(self._defining_context, value_node)) + )) for key_node, value_node in self.get_tree_entries() + ] + + return ValueSet([FakeList(self.inference_state, lazy_values)]) + + def exact_key_items(self): + """ + Returns a generator of tuples like dict.items(), where the key is + resolved (as a string) and the values are still lazy values. + """ + for key_node, value in self.get_tree_entries(): + for key in self._defining_context.infer_node(key_node): + if is_string(key): + yield key.get_safe_value(), LazyTreeValue(self._defining_context, value) + + def _dict_values(self): + return ValueSet.from_sets( + self._defining_context.infer_node(v) + for k, v in self.get_tree_entries() + ) + + def _dict_keys(self): + return ValueSet.from_sets( + self._defining_context.infer_node(k) + for k, v in self.get_tree_entries() + ) + + +class _FakeSequence(Sequence): + def __init__(self, inference_state, lazy_value_list): + """ + type should be one of "tuple", "list" + """ + super().__init__(inference_state) + self._lazy_value_list = lazy_value_list + + def py__simple_getitem__(self, index): + if isinstance(index, slice): + return ValueSet([self]) + + with reraise_getitem_errors(IndexError, TypeError): + lazy_value = self._lazy_value_list[index] + return lazy_value.infer() + + def py__iter__(self, contextualized_node=None): + return self._lazy_value_list + + def py__bool__(self): + return bool(len(self._lazy_value_list)) + + def __repr__(self): + return "<%s of %s>" % (type(self).__name__, self._lazy_value_list) + + +class FakeTuple(_FakeSequence): + array_type = 'tuple' + + +class FakeList(_FakeSequence): + array_type = 'tuple' + + +class FakeDict(_DictMixin, Sequence, _DictKeyMixin): + array_type = 'dict' + + def __init__(self, inference_state, dct): + super().__init__(inference_state) + self._dct = dct + + def py__iter__(self, contextualized_node=None): + for key in self._dct: + yield LazyKnownValue(compiled.create_simple_object(self.inference_state, key)) + + def py__simple_getitem__(self, index): + with reraise_getitem_errors(KeyError, TypeError): + lazy_value = self._dct[index] + return lazy_value.infer() + + @publish_method('values') + def _values(self, arguments): + return ValueSet([FakeTuple( + self.inference_state, + [LazyKnownValues(self._dict_values())] + )]) + + def _dict_values(self): + return ValueSet.from_sets(lazy_value.infer() for lazy_value in self._dct.values()) + + def _dict_keys(self): + return ValueSet.from_sets(lazy_value.infer() for lazy_value in self.py__iter__()) + + def exact_key_items(self): + return self._dct.items() + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._dct) + + +class MergedArray(Sequence): + def __init__(self, inference_state, arrays): + super().__init__(inference_state) + self.array_type = arrays[-1].array_type + self._arrays = arrays + + def py__iter__(self, contextualized_node=None): + for array in self._arrays: + yield from array.py__iter__() + + def py__simple_getitem__(self, index): + return ValueSet.from_sets(lazy_value.infer() for lazy_value in self.py__iter__()) + + +def unpack_tuple_to_dict(context, types, exprlist): + """ + Unpacking tuple assignments in for statements and expr_stmts. + """ + if exprlist.type == 'name': + return {exprlist.value: types} + elif exprlist.type == 'atom' and exprlist.children[0] in ('(', '['): + return unpack_tuple_to_dict(context, types, exprlist.children[1]) + elif exprlist.type in ('testlist', 'testlist_comp', 'exprlist', + 'testlist_star_expr'): + dct = {} + parts = iter(exprlist.children[::2]) + n = 0 + for lazy_value in types.iterate(ContextualizedNode(context, exprlist)): + n += 1 + try: + part = next(parts) + except StopIteration: + analysis.add(context, 'value-error-too-many-values', part, + message="ValueError: too many values to unpack (expected %s)" % n) + else: + dct.update(unpack_tuple_to_dict(context, lazy_value.infer(), part)) + has_parts = next(parts, None) + if types and has_parts is not None: + analysis.add(context, 'value-error-too-few-values', has_parts, + message="ValueError: need more than %s values to unpack" % n) + return dct + elif exprlist.type == 'power' or exprlist.type == 'atom_expr': + # Something like ``arr[x], var = ...``. + # This is something that is not yet supported, would also be difficult + # to write into a dict. + return {} + elif exprlist.type == 'star_expr': # `a, *b, c = x` type unpackings + # Currently we're not supporting them. + return {} + raise NotImplementedError + + +class Slice(LazyValueWrapper): + def __init__(self, python_context, start, stop, step): + self.inference_state = python_context.inference_state + self._context = python_context + # All of them are either a Precedence or None. + self._start = start + self._stop = stop + self._step = step + + def _get_wrapped_value(self): + value = compiled.builtin_from_name(self._context.inference_state, 'slice') + slice_value, = value.execute_with_values() + return slice_value + + def get_safe_value(self, default=sentinel): + """ + Imitate CompiledValue.obj behavior and return a ``builtin.slice()`` + object. + """ + def get(element): + if element is None: + return None + + result = self._context.infer_node(element) + if len(result) != 1: + # For simplicity, we want slices to be clear defined with just + # one type. Otherwise we will return an empty slice object. + raise IndexError + + value, = result + return get_int_or_none(value) + + try: + return slice(get(self._start), get(self._stop), get(self._step)) + except IndexError: + return slice(None, None, None) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/klass.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/klass.py new file mode 100644 index 000000000..ee8d4e850 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/klass.py @@ -0,0 +1,403 @@ +""" +Like described in the :mod:`parso.python.tree` module, +there's a need for an ast like module to represent the states of parsed +modules. + +But now there are also structures in Python that need a little bit more than +that. An ``Instance`` for example is only a ``Class`` before it is +instantiated. This class represents these cases. + +So, why is there also a ``Class`` class here? Well, there are decorators and +they change classes in Python 3. + +Representation modules also define "magic methods". Those methods look like +``py__foo__`` and are typically mappable to the Python equivalents ``__call__`` +and others. Here's a list: + +====================================== ======================================== +**Method** **Description** +-------------------------------------- ---------------------------------------- +py__call__(arguments: Array) On callable objects, returns types. +py__bool__() Returns True/False/None; None means that + there's no certainty. +py__bases__() Returns a list of base classes. +py__iter__() Returns a generator of a set of types. +py__class__() Returns the class of an instance. +py__simple_getitem__(index: int/str) Returns a a set of types of the index. + Can raise an IndexError/KeyError. +py__getitem__(indexes: ValueSet) Returns a a set of types of the index. +py__file__() Only on modules. Returns None if does + not exist. +py__package__() -> List[str] Only on modules. For the import system. +py__path__() Only on modules. For the import system. +py__get__(call_object) Only on instances. Simulates + descriptors. +py__doc__() Returns the docstring for a value. +====================================== ======================================== + +""" +from jedi import debug +from jedi.parser_utils import get_cached_parent_scope, expr_is_dotted, \ + function_is_property +from jedi.inference.cache import inference_state_method_cache, CachedMetaClass, \ + inference_state_method_generator_cache +from jedi.inference import compiled +from jedi.inference.lazy_value import LazyKnownValues, LazyTreeValue +from jedi.inference.filters import ParserTreeFilter +from jedi.inference.names import TreeNameDefinition, ValueName +from jedi.inference.arguments import unpack_arglist, ValuesArguments +from jedi.inference.base_value import ValueSet, iterator_to_value_set, \ + NO_VALUES +from jedi.inference.context import ClassContext +from jedi.inference.value.function import FunctionAndClassBase +from jedi.inference.gradual.generics import LazyGenericManager, TupleGenericManager +from jedi.plugins import plugin_manager + + +class ClassName(TreeNameDefinition): + def __init__(self, class_value, tree_name, name_context, apply_decorators): + super().__init__(name_context, tree_name) + self._apply_decorators = apply_decorators + self._class_value = class_value + + @iterator_to_value_set + def infer(self): + # We're using a different value to infer, so we cannot call super(). + from jedi.inference.syntax_tree import tree_name_to_values + inferred = tree_name_to_values( + self.parent_context.inference_state, self.parent_context, self.tree_name) + + for result_value in inferred: + if self._apply_decorators: + yield from result_value.py__get__(instance=None, class_value=self._class_value) + else: + yield result_value + + @property + def api_type(self): + type_ = super().api_type + if type_ == 'function': + definition = self.tree_name.get_definition() + if function_is_property(definition): + # This essentially checks if there is an @property before + # the function. @property could be something different, but + # any programmer that redefines property as something that + # is not really a property anymore, should be shot. (i.e. + # this is a heuristic). + return 'property' + return type_ + + +class ClassFilter(ParserTreeFilter): + def __init__(self, class_value, node_context=None, until_position=None, + origin_scope=None, is_instance=False): + super().__init__( + class_value.as_context(), node_context, + until_position=until_position, + origin_scope=origin_scope, + ) + self._class_value = class_value + self._is_instance = is_instance + + def _convert_names(self, names): + return [ + ClassName( + class_value=self._class_value, + tree_name=name, + name_context=self._node_context, + apply_decorators=not self._is_instance, + ) for name in names + ] + + def _equals_origin_scope(self): + node = self._origin_scope + while node is not None: + if node == self._parser_scope or node == self.parent_context: + return True + node = get_cached_parent_scope(self._parso_cache_node, node) + return False + + def _access_possible(self, name): + # Filter for ClassVar variables + # TODO this is not properly done, yet. It just checks for the string + # ClassVar in the annotation, which can be quite imprecise. If we + # wanted to do this correct, we would have to infer the ClassVar. + if not self._is_instance: + expr_stmt = name.get_definition() + if expr_stmt is not None and expr_stmt.type == 'expr_stmt': + annassign = expr_stmt.children[1] + if annassign.type == 'annassign': + # If there is an =, the variable is obviously also + # defined on the class. + if 'ClassVar' not in annassign.children[1].get_code() \ + and '=' not in annassign.children: + return False + + # Filter for name mangling of private variables like __foo + return not name.value.startswith('__') or name.value.endswith('__') \ + or self._equals_origin_scope() + + def _filter(self, names): + names = super()._filter(names) + return [name for name in names if self._access_possible(name)] + + +class ClassMixin: + def is_class(self): + return True + + def is_class_mixin(self): + return True + + def py__call__(self, arguments): + from jedi.inference.value import TreeInstance + + from jedi.inference.gradual.typing import TypedDict + if self.is_typeddict(): + return ValueSet([TypedDict(self)]) + return ValueSet([TreeInstance(self.inference_state, self.parent_context, self, arguments)]) + + def py__class__(self): + return compiled.builtin_from_name(self.inference_state, 'type') + + @property + def name(self): + return ValueName(self, self.tree_node.name) + + def py__name__(self): + return self.name.string_name + + @inference_state_method_generator_cache() + def py__mro__(self): + mro = [self] + yield self + # TODO Do a proper mro resolution. Currently we are just listing + # classes. However, it's a complicated algorithm. + for lazy_cls in self.py__bases__(): + # TODO there's multiple different mro paths possible if this yields + # multiple possibilities. Could be changed to be more correct. + for cls in lazy_cls.infer(): + # TODO detect for TypeError: duplicate base class str, + # e.g. `class X(str, str): pass` + try: + mro_method = cls.py__mro__ + except AttributeError: + # TODO add a TypeError like: + """ + >>> class Y(lambda: test): pass + Traceback (most recent call last): + File "", line 1, in + TypeError: function() argument 1 must be code, not str + >>> class Y(1): pass + Traceback (most recent call last): + File "", line 1, in + TypeError: int() takes at most 2 arguments (3 given) + """ + debug.warning('Super class of %s is not a class: %s', self, cls) + else: + for cls_new in mro_method(): + if cls_new not in mro: + mro.append(cls_new) + yield cls_new + + def get_filters(self, origin_scope=None, is_instance=False, + include_metaclasses=True, include_type_when_class=True): + if include_metaclasses: + metaclasses = self.get_metaclasses() + if metaclasses: + yield from self.get_metaclass_filters(metaclasses, is_instance) + + for cls in self.py__mro__(): + if cls.is_compiled(): + yield from cls.get_filters(is_instance=is_instance) + else: + yield ClassFilter( + self, node_context=cls.as_context(), + origin_scope=origin_scope, + is_instance=is_instance + ) + if not is_instance and include_type_when_class: + from jedi.inference.compiled import builtin_from_name + type_ = builtin_from_name(self.inference_state, 'type') + assert isinstance(type_, ClassValue) + if type_ != self: + # We are not using execute_with_values here, because the + # plugin function for type would get executed instead of an + # instance creation. + args = ValuesArguments([]) + for instance in type_.py__call__(args): + instance_filters = instance.get_filters() + # Filter out self filters + next(instance_filters, None) + next(instance_filters, None) + x = next(instance_filters, None) + assert x is not None + yield x + + def get_signatures(self): + # Since calling staticmethod without a function is illegal, the Jedi + # plugin doesn't return anything. Therefore call directly and get what + # we want: An instance of staticmethod. + metaclasses = self.get_metaclasses() + if metaclasses: + sigs = self.get_metaclass_signatures(metaclasses) + if sigs: + return sigs + args = ValuesArguments([]) + init_funcs = self.py__call__(args).py__getattribute__('__init__') + return [sig.bind(self) for sig in init_funcs.get_signatures()] + + def _as_context(self): + return ClassContext(self) + + def get_type_hint(self, add_class_info=True): + if add_class_info: + return 'Type[%s]' % self.py__name__() + return self.py__name__() + + @inference_state_method_cache(default=False) + def is_typeddict(self): + # TODO Do a proper mro resolution. Currently we are just listing + # classes. However, it's a complicated algorithm. + from jedi.inference.gradual.typing import TypedDictClass + for lazy_cls in self.py__bases__(): + if not isinstance(lazy_cls, LazyTreeValue): + return False + tree_node = lazy_cls.data + # Only resolve simple classes, stuff like Iterable[str] are more + # intensive to resolve and if generics are involved, we know it's + # not a TypedDict. + if not expr_is_dotted(tree_node): + return False + + for cls in lazy_cls.infer(): + if isinstance(cls, TypedDictClass): + return True + try: + method = cls.is_typeddict + except AttributeError: + # We're only dealing with simple classes, so just returning + # here should be fine. This only happens with e.g. compiled + # classes. + return False + else: + if method(): + return True + return False + + def py__getitem__(self, index_value_set, contextualized_node): + from jedi.inference.gradual.base import GenericClass + if not index_value_set: + debug.warning('Class indexes inferred to nothing. Returning class instead') + return ValueSet([self]) + return ValueSet( + GenericClass( + self, + LazyGenericManager( + context_of_index=contextualized_node.context, + index_value=index_value, + ) + ) + for index_value in index_value_set + ) + + def with_generics(self, generics_tuple): + from jedi.inference.gradual.base import GenericClass + return GenericClass( + self, + TupleGenericManager(generics_tuple) + ) + + def define_generics(self, type_var_dict): + from jedi.inference.gradual.base import GenericClass + + def remap_type_vars(): + """ + The TypeVars in the resulting classes have sometimes different names + and we need to check for that, e.g. a signature can be: + + def iter(iterable: Iterable[_T]) -> Iterator[_T]: ... + + However, the iterator is defined as Iterator[_T_co], which means it has + a different type var name. + """ + for type_var in self.list_type_vars(): + yield type_var_dict.get(type_var.py__name__(), NO_VALUES) + + if type_var_dict: + return ValueSet([GenericClass( + self, + TupleGenericManager(tuple(remap_type_vars())) + )]) + return ValueSet({self}) + + +class ClassValue(ClassMixin, FunctionAndClassBase, metaclass=CachedMetaClass): + api_type = 'class' + + @inference_state_method_cache() + def list_type_vars(self): + found = [] + arglist = self.tree_node.get_super_arglist() + if arglist is None: + return [] + + for stars, node in unpack_arglist(arglist): + if stars: + continue # These are not relevant for this search. + + from jedi.inference.gradual.annotation import find_unknown_type_vars + for type_var in find_unknown_type_vars(self.parent_context, node): + if type_var not in found: + # The order matters and it's therefore a list. + found.append(type_var) + return found + + def _get_bases_arguments(self): + arglist = self.tree_node.get_super_arglist() + if arglist: + from jedi.inference import arguments + return arguments.TreeArguments(self.inference_state, self.parent_context, arglist) + return None + + @inference_state_method_cache(default=()) + def py__bases__(self): + args = self._get_bases_arguments() + if args is not None: + lst = [value for key, value in args.unpack() if key is None] + if lst: + return lst + + if self.py__name__() == 'object' \ + and self.parent_context.is_builtins_module(): + return [] + return [LazyKnownValues( + self.inference_state.builtins_module.py__getattribute__('object') + )] + + @plugin_manager.decorate() + def get_metaclass_filters(self, metaclasses, is_instance): + debug.warning('Unprocessed metaclass %s', metaclasses) + return [] + + @inference_state_method_cache(default=NO_VALUES) + def get_metaclasses(self): + args = self._get_bases_arguments() + if args is not None: + m = [value for key, value in args.unpack() if key == 'metaclass'] + metaclasses = ValueSet.from_sets(lazy_value.infer() for lazy_value in m) + metaclasses = ValueSet(m for m in metaclasses if m.is_class()) + if metaclasses: + return metaclasses + + for lazy_base in self.py__bases__(): + for value in lazy_base.infer(): + if value.is_class(): + values = value.get_metaclasses() + if values: + return values + return NO_VALUES + + @plugin_manager.decorate() + def get_metaclass_signatures(self, metaclasses): + return [] diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/module.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/module.py new file mode 100644 index 000000000..6461cb4bc --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/module.py @@ -0,0 +1,230 @@ +import os +from pathlib import Path +from typing import Optional + +from jedi.inference.cache import inference_state_method_cache +from jedi.inference.names import AbstractNameDefinition, ModuleName +from jedi.inference.filters import GlobalNameFilter, ParserTreeFilter, DictFilter, MergedFilter +from jedi.inference import compiled +from jedi.inference.base_value import TreeValue +from jedi.inference.names import SubModuleName +from jedi.inference.helpers import values_from_qualified_names +from jedi.inference.compiled import create_simple_object +from jedi.inference.base_value import ValueSet +from jedi.inference.context import ModuleContext + + +class _ModuleAttributeName(AbstractNameDefinition): + """ + For module attributes like __file__, __str__ and so on. + """ + api_type = 'instance' + + def __init__(self, parent_module, string_name, string_value=None): + self.parent_context = parent_module + self.string_name = string_name + self._string_value = string_value + + def infer(self): + if self._string_value is not None: + s = self._string_value + return ValueSet([ + create_simple_object(self.parent_context.inference_state, s) + ]) + return compiled.get_string_value_set(self.parent_context.inference_state) + + +class SubModuleDictMixin: + @inference_state_method_cache() + def sub_modules_dict(self): + """ + Lists modules in the directory of this module (if this module is a + package). + """ + names = {} + if self.is_package(): + mods = self.inference_state.compiled_subprocess.iter_module_names( + self.py__path__() + ) + for name in mods: + # It's obviously a relative import to the current module. + names[name] = SubModuleName(self.as_context(), name) + + # In the case of an import like `from x.` we don't need to + # add all the variables, this is only about submodules. + return names + + +class ModuleMixin(SubModuleDictMixin): + _module_name_class = ModuleName + + def get_filters(self, origin_scope=None): + yield MergedFilter( + ParserTreeFilter( + parent_context=self.as_context(), + origin_scope=origin_scope + ), + GlobalNameFilter(self.as_context()), + ) + yield DictFilter(self.sub_modules_dict()) + yield DictFilter(self._module_attributes_dict()) + yield from self.iter_star_filters() + + def py__class__(self): + c, = values_from_qualified_names(self.inference_state, 'types', 'ModuleType') + return c + + def is_module(self): + return True + + def is_stub(self): + return False + + @property # type: ignore[misc] + @inference_state_method_cache() + def name(self): + return self._module_name_class(self, self.string_names[-1]) + + @inference_state_method_cache() + def _module_attributes_dict(self): + names = ['__package__', '__doc__', '__name__'] + # All the additional module attributes are strings. + dct = dict((n, _ModuleAttributeName(self, n)) for n in names) + path = self.py__file__() + if path is not None: + dct['__file__'] = _ModuleAttributeName(self, '__file__', str(path)) + return dct + + def iter_star_filters(self): + for star_module in self.star_imports(): + f = next(star_module.get_filters(), None) + assert f is not None + yield f + + # I'm not sure if the star import cache is really that effective anymore + # with all the other really fast import caches. Recheck. Also we would need + # to push the star imports into InferenceState.module_cache, if we reenable this. + @inference_state_method_cache([]) + def star_imports(self): + from jedi.inference.imports import Importer + + modules = [] + module_context = self.as_context() + for i in self.tree_node.iter_imports(): + if i.is_star_import(): + new = Importer( + self.inference_state, + import_path=i.get_paths()[-1], + module_context=module_context, + level=i.level + ).follow() + + for module in new: + if isinstance(module, ModuleValue): + modules += module.star_imports() + modules += new + return modules + + def get_qualified_names(self): + """ + A module doesn't have a qualified name, but it's important to note that + it's reachable and not `None`. With this information we can add + qualified names on top for all value children. + """ + return () + + +class ModuleValue(ModuleMixin, TreeValue): + api_type = 'module' + + def __init__(self, inference_state, module_node, code_lines, file_io=None, + string_names=None, is_package=False): + super().__init__( + inference_state, + parent_context=None, + tree_node=module_node + ) + self.file_io = file_io + if file_io is None: + self._path: Optional[Path] = None + else: + self._path = file_io.path + self.string_names = string_names # Optional[Tuple[str, ...]] + self.code_lines = code_lines + self._is_package = is_package + + def is_stub(self): + if self._path is not None and self._path.suffix == '.pyi': + # Currently this is the way how we identify stubs when e.g. goto is + # used in them. This could be changed if stubs would be identified + # sooner and used as StubModuleValue. + return True + return super().is_stub() + + def py__name__(self): + if self.string_names is None: + return None + return '.'.join(self.string_names) + + def py__file__(self) -> Optional[Path]: + """ + In contrast to Python's __file__ can be None. + """ + if self._path is None: + return None + + return self._path.absolute() + + def is_package(self): + return self._is_package + + def py__package__(self): + if self.string_names is None: + return [] + + if self._is_package: + return self.string_names + return self.string_names[:-1] + + def py__path__(self): + """ + In case of a package, this returns Python's __path__ attribute, which + is a list of paths (strings). + Returns None if the module is not a package. + """ + if not self._is_package: + return None + + # A namespace package is typically auto generated and ~10 lines long. + first_few_lines = ''.join(self.code_lines[:50]) + # these are strings that need to be used for namespace packages, + # the first one is ``pkgutil``, the second ``pkg_resources``. + options = ('declare_namespace(__name__)', 'extend_path(__path__') + if options[0] in first_few_lines or options[1] in first_few_lines: + # It is a namespace, now try to find the rest of the + # modules on sys_path or whatever the search_path is. + paths = set() + for s in self.inference_state.get_sys_path(): + other = os.path.join(s, self.name.string_name) + if os.path.isdir(other): + paths.add(other) + if paths: + return list(paths) + # Nested namespace packages will not be supported. Nobody ever + # asked for it and in Python 3 they are there without using all the + # crap above. + + # Default to the of this file. + file = self.py__file__() + assert file is not None # Shouldn't be a package in the first place. + return [os.path.dirname(file)] + + def _as_context(self): + return ModuleContext(self) + + def __repr__(self): + return "<%s: %s@%s-%s is_stub=%s>" % ( + self.__class__.__name__, self.py__name__(), + self.tree_node.start_pos[0], self.tree_node.end_pos[0], + self.is_stub() + ) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/namespace.py b/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/namespace.py new file mode 100644 index 000000000..11737cc97 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/inference/value/namespace.py @@ -0,0 +1,74 @@ +from pathlib import Path +from typing import Optional + +from jedi.inference.cache import inference_state_method_cache +from jedi.inference.filters import DictFilter +from jedi.inference.names import ValueNameMixin, AbstractNameDefinition +from jedi.inference.base_value import Value +from jedi.inference.value.module import SubModuleDictMixin +from jedi.inference.context import NamespaceContext + + +class ImplicitNSName(ValueNameMixin, AbstractNameDefinition): + """ + Accessing names for implicit namespace packages should infer to nothing. + This object will prevent Jedi from raising exceptions + """ + def __init__(self, implicit_ns_value, string_name): + self._value = implicit_ns_value + self.string_name = string_name + + +class ImplicitNamespaceValue(Value, SubModuleDictMixin): + """ + Provides support for implicit namespace packages + """ + api_type = 'namespace' + parent_context = None + + def __init__(self, inference_state, string_names, paths): + super().__init__(inference_state, parent_context=None) + self.inference_state = inference_state + self.string_names = string_names + self._paths = paths + + def get_filters(self, origin_scope=None): + yield DictFilter(self.sub_modules_dict()) + + def get_qualified_names(self): + return () + + @property # type: ignore[misc] + @inference_state_method_cache() + def name(self): + string_name = self.py__package__()[-1] + return ImplicitNSName(self, string_name) + + def py__file__(self) -> Optional[Path]: + return None + + def py__package__(self): + """Return the fullname + """ + return self.string_names + + def py__path__(self): + return self._paths + + def py__name__(self): + return '.'.join(self.string_names) + + def is_namespace(self): + return True + + def is_stub(self): + return False + + def is_package(self): + return True + + def as_context(self): + return NamespaceContext(self) + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.py__name__()) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/parser_utils.py b/bundle/jedi-vim/pythonx/jedi/jedi/parser_utils.py new file mode 100644 index 000000000..3c7fa151e --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/parser_utils.py @@ -0,0 +1,341 @@ +import re +import textwrap +from ast import literal_eval +from inspect import cleandoc +from weakref import WeakKeyDictionary + +from parso.python import tree +from parso.cache import parser_cache +from parso import split_lines + +_EXECUTE_NODES = {'funcdef', 'classdef', 'import_from', 'import_name', 'test', + 'or_test', 'and_test', 'not_test', 'comparison', 'expr', + 'xor_expr', 'and_expr', 'shift_expr', 'arith_expr', + 'atom_expr', 'term', 'factor', 'power', 'atom'} + +_FLOW_KEYWORDS = ( + 'try', 'except', 'finally', 'else', 'if', 'elif', 'with', 'for', 'while' +) + + +def get_executable_nodes(node, last_added=False): + """ + For static analysis. + """ + result = [] + typ = node.type + if typ == 'name': + next_leaf = node.get_next_leaf() + if last_added is False and node.parent.type != 'param' and next_leaf != '=': + result.append(node) + elif typ == 'expr_stmt': + # I think inferring the statement (and possibly returned arrays), + # should be enough for static analysis. + result.append(node) + for child in node.children: + result += get_executable_nodes(child, last_added=True) + elif typ == 'decorator': + # decorator + if node.children[-2] == ')': + node = node.children[-3] + if node != '(': + result += get_executable_nodes(node) + else: + try: + children = node.children + except AttributeError: + pass + else: + if node.type in _EXECUTE_NODES and not last_added: + result.append(node) + + for child in children: + result += get_executable_nodes(child, last_added) + + return result + + +def get_sync_comp_fors(comp_for): + yield comp_for + last = comp_for.children[-1] + while True: + if last.type == 'comp_for': + yield last.children[1] # Ignore the async. + elif last.type == 'sync_comp_for': + yield last + elif not last.type == 'comp_if': + break + last = last.children[-1] + + +def for_stmt_defines_one_name(for_stmt): + """ + Returns True if only one name is returned: ``for x in y``. + Returns False if the for loop is more complicated: ``for x, z in y``. + + :returns: bool + """ + return for_stmt.children[1].type == 'name' + + +def get_flow_branch_keyword(flow_node, node): + start_pos = node.start_pos + if not (flow_node.start_pos < start_pos <= flow_node.end_pos): + raise ValueError('The node is not part of the flow.') + + keyword = None + for i, child in enumerate(flow_node.children): + if start_pos < child.start_pos: + return keyword + first_leaf = child.get_first_leaf() + if first_leaf in _FLOW_KEYWORDS: + keyword = first_leaf + return None + + +def clean_scope_docstring(scope_node): + """ Returns a cleaned version of the docstring token. """ + node = scope_node.get_doc_node() + if node is not None: + # TODO We have to check next leaves until there are no new + # leaves anymore that might be part of the docstring. A + # docstring can also look like this: ``'foo' 'bar' + # Returns a literal cleaned version of the ``Token``. + return cleandoc(safe_literal_eval(node.value)) + return '' + + +def find_statement_documentation(tree_node): + if tree_node.type == 'expr_stmt': + tree_node = tree_node.parent # simple_stmt + maybe_string = tree_node.get_next_sibling() + if maybe_string is not None: + if maybe_string.type == 'simple_stmt': + maybe_string = maybe_string.children[0] + if maybe_string.type == 'string': + return cleandoc(safe_literal_eval(maybe_string.value)) + return '' + + +def safe_literal_eval(value): + first_two = value[:2].lower() + if first_two[0] == 'f' or first_two in ('fr', 'rf'): + # literal_eval is not able to resovle f literals. We have to do that + # manually, but that's right now not implemented. + return '' + + return literal_eval(value) + + +def get_signature(funcdef, width=72, call_string=None, + omit_first_param=False, omit_return_annotation=False): + """ + Generate a string signature of a function. + + :param width: Fold lines if a line is longer than this value. + :type width: int + :arg func_name: Override function name when given. + :type func_name: str + + :rtype: str + """ + # Lambdas have no name. + if call_string is None: + if funcdef.type == 'lambdef': + call_string = '' + else: + call_string = funcdef.name.value + params = funcdef.get_params() + if omit_first_param: + params = params[1:] + p = '(' + ''.join(param.get_code() for param in params).strip() + ')' + # TODO this is pretty bad, we should probably just normalize. + p = re.sub(r'\s+', ' ', p) + if funcdef.annotation and not omit_return_annotation: + rtype = " ->" + funcdef.annotation.get_code() + else: + rtype = "" + code = call_string + p + rtype + + return '\n'.join(textwrap.wrap(code, width)) + + +def move(node, line_offset): + """ + Move the `Node` start_pos. + """ + try: + children = node.children + except AttributeError: + node.line += line_offset + else: + for c in children: + move(c, line_offset) + + +def get_following_comment_same_line(node): + """ + returns (as string) any comment that appears on the same line, + after the node, including the # + """ + try: + if node.type == 'for_stmt': + whitespace = node.children[5].get_first_leaf().prefix + elif node.type == 'with_stmt': + whitespace = node.children[3].get_first_leaf().prefix + elif node.type == 'funcdef': + # actually on the next line + whitespace = node.children[4].get_first_leaf().get_next_leaf().prefix + else: + whitespace = node.get_last_leaf().get_next_leaf().prefix + except AttributeError: + return None + except ValueError: + # TODO in some particular cases, the tree doesn't seem to be linked + # correctly + return None + if "#" not in whitespace: + return None + comment = whitespace[whitespace.index("#"):] + if "\r" in comment: + comment = comment[:comment.index("\r")] + if "\n" in comment: + comment = comment[:comment.index("\n")] + return comment + + +def is_scope(node): + t = node.type + if t == 'comp_for': + # Starting with Python 3.8, async is outside of the statement. + return node.children[1].type != 'sync_comp_for' + + return t in ('file_input', 'classdef', 'funcdef', 'lambdef', 'sync_comp_for') + + +def _get_parent_scope_cache(func): + cache = WeakKeyDictionary() + + def wrapper(parso_cache_node, node, include_flows=False): + if parso_cache_node is None: + return func(node, include_flows) + + try: + for_module = cache[parso_cache_node] + except KeyError: + for_module = cache[parso_cache_node] = {} + + try: + return for_module[node] + except KeyError: + result = for_module[node] = func(node, include_flows) + return result + return wrapper + + +def get_parent_scope(node, include_flows=False): + """ + Returns the underlying scope. + """ + scope = node.parent + if scope is None: + return None # It's a module already. + + while True: + if is_scope(scope): + if scope.type in ('classdef', 'funcdef', 'lambdef'): + index = scope.children.index(':') + if scope.children[index].start_pos >= node.start_pos: + if node.parent.type == 'param' and node.parent.name == node: + pass + elif node.parent.type == 'tfpdef' and node.parent.children[0] == node: + pass + else: + scope = scope.parent + continue + return scope + elif include_flows and isinstance(scope, tree.Flow): + # The cursor might be on `if foo`, so the parent scope will not be + # the if, but the parent of the if. + if not (scope.type == 'if_stmt' + and any(n.start_pos <= node.start_pos < n.end_pos + for n in scope.get_test_nodes())): + return scope + + scope = scope.parent + + +get_cached_parent_scope = _get_parent_scope_cache(get_parent_scope) + + +def get_cached_code_lines(grammar, path): + """ + Basically access the cached code lines in parso. This is not the nicest way + to do this, but we avoid splitting all the lines again. + """ + return get_parso_cache_node(grammar, path).lines + + +def get_parso_cache_node(grammar, path): + """ + This is of course not public. But as long as I control parso, this + shouldn't be a problem. ~ Dave + + The reason for this is mostly caching. This is obviously also a sign of a + broken caching architecture. + """ + return parser_cache[grammar._hashed][path] + + +def cut_value_at_position(leaf, position): + """ + Cuts of the value of the leaf at position + """ + lines = split_lines(leaf.value, keepends=True)[:position[0] - leaf.line + 1] + column = position[1] + if leaf.line == position[0]: + column -= leaf.column + if not lines: + return '' + lines[-1] = lines[-1][:column] + return ''.join(lines) + + +def expr_is_dotted(node): + """ + Checks if a path looks like `name` or `name.foo.bar` and not `name()`. + """ + if node.type == 'atom': + if len(node.children) == 3 and node.children[0] == '(': + return expr_is_dotted(node.children[1]) + return False + if node.type == 'atom_expr': + children = node.children + if children[0] == 'await': + return False + if not expr_is_dotted(children[0]): + return False + # Check trailers + return all(c.children[0] == '.' for c in children[1:]) + return node.type == 'name' + + +def _function_is_x_method(*method_names): + def wrapper(function_node): + """ + This is a heuristic. It will not hold ALL the times, but it will be + correct pretty much for anyone that doesn't try to beat it. + staticmethod/classmethod are builtins and unless overwritten, this will + be correct. + """ + for decorator in function_node.get_decorators(): + dotted_name = decorator.children[1] + if dotted_name.get_code() in method_names: + return True + return False + return wrapper + + +function_is_staticmethod = _function_is_x_method('staticmethod') +function_is_classmethod = _function_is_x_method('classmethod') +function_is_property = _function_is_x_method('property', 'cached_property') diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/plugins/__init__.py b/bundle/jedi-vim/pythonx/jedi/jedi/plugins/__init__.py new file mode 100644 index 000000000..8067676d0 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/plugins/__init__.py @@ -0,0 +1,47 @@ +from functools import wraps + + +class _PluginManager: + def __init__(self): + self._registered_plugins = [] + self._cached_base_callbacks = {} + self._built_functions = {} + + def register(self, *plugins): + """ + Makes it possible to register your plugin. + """ + self._registered_plugins.extend(plugins) + self._build_functions() + + def decorate(self, name=None): + def decorator(callback): + @wraps(callback) + def wrapper(*args, **kwargs): + return built_functions[public_name](*args, **kwargs) + + public_name = name or callback.__name__ + + assert public_name not in self._built_functions + built_functions = self._built_functions + built_functions[public_name] = callback + self._cached_base_callbacks[public_name] = callback + + return wrapper + + return decorator + + def _build_functions(self): + for name, callback in self._cached_base_callbacks.items(): + for plugin in reversed(self._registered_plugins): + # Need to reverse so the first plugin is run first. + try: + func = getattr(plugin, name) + except AttributeError: + pass + else: + callback = func(callback) + self._built_functions[name] = callback + + +plugin_manager = _PluginManager() diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/plugins/django.py b/bundle/jedi-vim/pythonx/jedi/jedi/plugins/django.py new file mode 100644 index 000000000..cd443bbda --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/plugins/django.py @@ -0,0 +1,296 @@ +""" +Module is used to infer Django model fields. +""" +from inspect import Parameter + +from jedi import debug +from jedi.inference.cache import inference_state_function_cache +from jedi.inference.base_value import ValueSet, iterator_to_value_set, ValueWrapper +from jedi.inference.filters import DictFilter, AttributeOverwrite +from jedi.inference.names import NameWrapper, BaseTreeParamName +from jedi.inference.compiled.value import EmptyCompiledName +from jedi.inference.value.instance import TreeInstance +from jedi.inference.value.klass import ClassMixin +from jedi.inference.gradual.base import GenericClass +from jedi.inference.gradual.generics import TupleGenericManager +from jedi.inference.signature import AbstractSignature + + +mapping = { + 'IntegerField': (None, 'int'), + 'BigIntegerField': (None, 'int'), + 'PositiveIntegerField': (None, 'int'), + 'SmallIntegerField': (None, 'int'), + 'CharField': (None, 'str'), + 'TextField': (None, 'str'), + 'EmailField': (None, 'str'), + 'GenericIPAddressField': (None, 'str'), + 'URLField': (None, 'str'), + 'FloatField': (None, 'float'), + 'BinaryField': (None, 'bytes'), + 'BooleanField': (None, 'bool'), + 'DecimalField': ('decimal', 'Decimal'), + 'TimeField': ('datetime', 'time'), + 'DurationField': ('datetime', 'timedelta'), + 'DateField': ('datetime', 'date'), + 'DateTimeField': ('datetime', 'datetime'), + 'UUIDField': ('uuid', 'UUID'), +} + +_FILTER_LIKE_METHODS = ('create', 'filter', 'exclude', 'update', 'get', + 'get_or_create', 'update_or_create') + + +@inference_state_function_cache() +def _get_deferred_attributes(inference_state): + return inference_state.import_module( + ('django', 'db', 'models', 'query_utils') + ).py__getattribute__('DeferredAttribute').execute_annotation() + + +def _infer_scalar_field(inference_state, field_name, field_tree_instance, is_instance): + try: + module_name, attribute_name = mapping[field_tree_instance.py__name__()] + except KeyError: + return None + + if not is_instance: + return _get_deferred_attributes(inference_state) + + if module_name is None: + module = inference_state.builtins_module + else: + module = inference_state.import_module((module_name,)) + + for attribute in module.py__getattribute__(attribute_name): + return attribute.execute_with_values() + + +@iterator_to_value_set +def _get_foreign_key_values(cls, field_tree_instance): + if isinstance(field_tree_instance, TreeInstance): + # TODO private access.. + argument_iterator = field_tree_instance._arguments.unpack() + key, lazy_values = next(argument_iterator, (None, None)) + if key is None and lazy_values is not None: + for value in lazy_values.infer(): + if value.py__name__() == 'str': + foreign_key_class_name = value.get_safe_value() + module = cls.get_root_context() + for v in module.py__getattribute__(foreign_key_class_name): + if v.is_class(): + yield v + elif value.is_class(): + yield value + + +def _infer_field(cls, field_name, is_instance): + inference_state = cls.inference_state + result = field_name.infer() + for field_tree_instance in result: + scalar_field = _infer_scalar_field( + inference_state, field_name, field_tree_instance, is_instance) + if scalar_field is not None: + return scalar_field + + name = field_tree_instance.py__name__() + is_many_to_many = name == 'ManyToManyField' + if name in ('ForeignKey', 'OneToOneField') or is_many_to_many: + if not is_instance: + return _get_deferred_attributes(inference_state) + + values = _get_foreign_key_values(cls, field_tree_instance) + if is_many_to_many: + return ValueSet(filter(None, [ + _create_manager_for(v, 'RelatedManager') for v in values + ])) + else: + return values.execute_with_values() + + debug.dbg('django plugin: fail to infer `%s` from class `%s`', + field_name.string_name, cls.py__name__()) + return result + + +class DjangoModelName(NameWrapper): + def __init__(self, cls, name, is_instance): + super().__init__(name) + self._cls = cls + self._is_instance = is_instance + + def infer(self): + return _infer_field(self._cls, self._wrapped_name, self._is_instance) + + +def _create_manager_for(cls, manager_cls='BaseManager'): + managers = cls.inference_state.import_module( + ('django', 'db', 'models', 'manager') + ).py__getattribute__(manager_cls) + for m in managers: + if m.is_class_mixin(): + generics_manager = TupleGenericManager((ValueSet([cls]),)) + for c in GenericClass(m, generics_manager).execute_annotation(): + return c + return None + + +def _new_dict_filter(cls, is_instance): + filters = list(cls.get_filters( + is_instance=is_instance, + include_metaclasses=False, + include_type_when_class=False) + ) + dct = { + name.string_name: DjangoModelName(cls, name, is_instance) + for filter_ in reversed(filters) + for name in filter_.values() + } + if is_instance: + # Replace the objects with a name that amounts to nothing when accessed + # in an instance. This is not perfect and still completes "objects" in + # that case, but it at least not inferes stuff like `.objects.filter`. + # It would be nicer to do that in a better way, so that it also doesn't + # show up in completions, but it's probably just not worth doing that + # for the extra amount of work. + dct['objects'] = EmptyCompiledName(cls.inference_state, 'objects') + + return DictFilter(dct) + + +def is_django_model_base(value): + return value.py__name__() == 'ModelBase' \ + and value.get_root_context().py__name__() == 'django.db.models.base' + + +def get_metaclass_filters(func): + def wrapper(cls, metaclasses, is_instance): + for metaclass in metaclasses: + if is_django_model_base(metaclass): + return [_new_dict_filter(cls, is_instance)] + + return func(cls, metaclasses, is_instance) + return wrapper + + +def tree_name_to_values(func): + def wrapper(inference_state, context, tree_name): + result = func(inference_state, context, tree_name) + if tree_name.value in _FILTER_LIKE_METHODS: + # Here we try to overwrite stuff like User.objects.filter. We need + # this to make sure that keyword param completion works on these + # kind of methods. + for v in result: + if v.get_qualified_names() == ('_BaseQuerySet', tree_name.value) \ + and v.parent_context.is_module() \ + and v.parent_context.py__name__() == 'django.db.models.query': + qs = context.get_value() + generics = qs.get_generics() + if len(generics) >= 1: + return ValueSet(QuerySetMethodWrapper(v, model) + for model in generics[0]) + + elif tree_name.value == 'BaseManager' and context.is_module() \ + and context.py__name__() == 'django.db.models.manager': + return ValueSet(ManagerWrapper(r) for r in result) + + elif tree_name.value == 'Field' and context.is_module() \ + and context.py__name__() == 'django.db.models.fields': + return ValueSet(FieldWrapper(r) for r in result) + return result + return wrapper + + +def _find_fields(cls): + for name in _new_dict_filter(cls, is_instance=False).values(): + for value in name.infer(): + if value.name.get_qualified_names(include_module_names=True) \ + == ('django', 'db', 'models', 'query_utils', 'DeferredAttribute'): + yield name + + +def _get_signatures(cls): + return [DjangoModelSignature(cls, field_names=list(_find_fields(cls)))] + + +def get_metaclass_signatures(func): + def wrapper(cls, metaclasses): + for metaclass in metaclasses: + if is_django_model_base(metaclass): + return _get_signatures(cls) + return func(cls, metaclass) + return wrapper + + +class ManagerWrapper(ValueWrapper): + def py__getitem__(self, index_value_set, contextualized_node): + return ValueSet( + GenericManagerWrapper(generic) + for generic in self._wrapped_value.py__getitem__( + index_value_set, contextualized_node) + ) + + +class GenericManagerWrapper(AttributeOverwrite, ClassMixin): + def py__get__on_class(self, calling_instance, instance, class_value): + return calling_instance.class_value.with_generics( + (ValueSet({class_value}),) + ).py__call__(calling_instance._arguments) + + def with_generics(self, generics_tuple): + return self._wrapped_value.with_generics(generics_tuple) + + +class FieldWrapper(ValueWrapper): + def py__getitem__(self, index_value_set, contextualized_node): + return ValueSet( + GenericFieldWrapper(generic) + for generic in self._wrapped_value.py__getitem__( + index_value_set, contextualized_node) + ) + + +class GenericFieldWrapper(AttributeOverwrite, ClassMixin): + def py__get__on_class(self, calling_instance, instance, class_value): + # This is mostly an optimization to avoid Jedi aborting inference, + # because of too many function executions of Field.__get__. + return ValueSet({calling_instance}) + + +class DjangoModelSignature(AbstractSignature): + def __init__(self, value, field_names): + super().__init__(value) + self._field_names = field_names + + def get_param_names(self, resolve_stars=False): + return [DjangoParamName(name) for name in self._field_names] + + +class DjangoParamName(BaseTreeParamName): + def __init__(self, field_name): + super().__init__(field_name.parent_context, field_name.tree_name) + self._field_name = field_name + + def get_kind(self): + return Parameter.KEYWORD_ONLY + + def infer(self): + return self._field_name.infer() + + +class QuerySetMethodWrapper(ValueWrapper): + def __init__(self, method, model_cls): + super().__init__(method) + self._model_cls = model_cls + + def py__get__(self, instance, class_value): + return ValueSet({QuerySetBoundMethodWrapper(v, self._model_cls) + for v in self._wrapped_value.py__get__(instance, class_value)}) + + +class QuerySetBoundMethodWrapper(ValueWrapper): + def __init__(self, method, model_cls): + super().__init__(method) + self._model_cls = model_cls + + def get_signatures(self): + return _get_signatures(self._model_cls) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/plugins/flask.py b/bundle/jedi-vim/pythonx/jedi/jedi/plugins/flask.py new file mode 100644 index 000000000..8d67b8395 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/plugins/flask.py @@ -0,0 +1,21 @@ +def import_module(callback): + """ + Handle "magic" Flask extension imports: + ``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``. + """ + def wrapper(inference_state, import_names, module_context, *args, **kwargs): + if len(import_names) == 3 and import_names[:2] == ('flask', 'ext'): + # New style. + ipath = ('flask_' + import_names[2]), + value_set = callback(inference_state, ipath, None, *args, **kwargs) + if value_set: + return value_set + value_set = callback(inference_state, ('flaskext',), None, *args, **kwargs) + return callback( + inference_state, + ('flaskext', import_names[2]), + next(iter(value_set)), + *args, **kwargs + ) + return callback(inference_state, import_names, module_context, *args, **kwargs) + return wrapper diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/plugins/pytest.py b/bundle/jedi-vim/pythonx/jedi/jedi/plugins/pytest.py new file mode 100644 index 000000000..c78bdb4f7 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/plugins/pytest.py @@ -0,0 +1,203 @@ +from pathlib import Path + +from parso.tree import search_ancestor +from jedi.inference.cache import inference_state_method_cache +from jedi.inference.imports import load_module_from_path +from jedi.inference.filters import ParserTreeFilter +from jedi.inference.base_value import NO_VALUES, ValueSet +from jedi.inference.helpers import infer_call_of_leaf + +_PYTEST_FIXTURE_MODULES = [ + ('_pytest', 'monkeypatch'), + ('_pytest', 'capture'), + ('_pytest', 'logging'), + ('_pytest', 'tmpdir'), + ('_pytest', 'pytester'), +] + + +def execute(callback): + def wrapper(value, arguments): + # This might not be necessary anymore in pytest 4/5, definitely needed + # for pytest 3. + if value.py__name__() == 'fixture' \ + and value.parent_context.py__name__() == '_pytest.fixtures': + return NO_VALUES + + return callback(value, arguments) + return wrapper + + +def infer_anonymous_param(func): + def get_returns(value): + if value.tree_node.annotation is not None: + result = value.execute_with_values() + if any(v.name.get_qualified_names(include_module_names=True) + == ('typing', 'Generator') + for v in result): + return ValueSet.from_sets( + v.py__getattribute__('__next__').execute_annotation() + for v in result + ) + return result + + # In pytest we need to differentiate between generators and normal + # returns. + # Parameters still need to be anonymous, .as_context() ensures that. + function_context = value.as_context() + if function_context.is_generator(): + return function_context.merge_yield_values() + else: + return function_context.get_return_values() + + def wrapper(param_name): + # parameters with an annotation do not need special handling + if param_name.annotation_node: + return func(param_name) + is_pytest_param, param_name_is_function_name = \ + _is_a_pytest_param_and_inherited(param_name) + if is_pytest_param: + module = param_name.get_root_context() + fixtures = _goto_pytest_fixture( + module, + param_name.string_name, + # This skips the current module, because we are basically + # inheriting a fixture from somewhere else. + skip_own_module=param_name_is_function_name, + ) + if fixtures: + return ValueSet.from_sets( + get_returns(value) + for fixture in fixtures + for value in fixture.infer() + ) + return func(param_name) + return wrapper + + +def goto_anonymous_param(func): + def wrapper(param_name): + is_pytest_param, param_name_is_function_name = \ + _is_a_pytest_param_and_inherited(param_name) + if is_pytest_param: + names = _goto_pytest_fixture( + param_name.get_root_context(), + param_name.string_name, + skip_own_module=param_name_is_function_name, + ) + if names: + return names + return func(param_name) + return wrapper + + +def complete_param_names(func): + def wrapper(context, func_name, decorator_nodes): + module_context = context.get_root_context() + if _is_pytest_func(func_name, decorator_nodes): + names = [] + for module_context in _iter_pytest_modules(module_context): + names += FixtureFilter(module_context).values() + if names: + return names + return func(context, func_name, decorator_nodes) + return wrapper + + +def _goto_pytest_fixture(module_context, name, skip_own_module): + for module_context in _iter_pytest_modules(module_context, skip_own_module=skip_own_module): + names = FixtureFilter(module_context).get(name) + if names: + return names + + +def _is_a_pytest_param_and_inherited(param_name): + """ + Pytest params are either in a `test_*` function or have a pytest fixture + with the decorator @pytest.fixture. + + This is a heuristic and will work in most cases. + """ + funcdef = search_ancestor(param_name.tree_name, 'funcdef') + if funcdef is None: # A lambda + return False, False + decorators = funcdef.get_decorators() + return _is_pytest_func(funcdef.name.value, decorators), \ + funcdef.name.value == param_name.string_name + + +def _is_pytest_func(func_name, decorator_nodes): + return func_name.startswith('test') \ + or any('fixture' in n.get_code() for n in decorator_nodes) + + +@inference_state_method_cache() +def _iter_pytest_modules(module_context, skip_own_module=False): + if not skip_own_module: + yield module_context + + file_io = module_context.get_value().file_io + if file_io is not None: + folder = file_io.get_parent_folder() + sys_path = module_context.inference_state.get_sys_path() + + # prevent an infinite loop when reaching the root of the current drive + last_folder = None + + while any(folder.path.startswith(p) for p in sys_path): + file_io = folder.get_file_io('conftest.py') + if Path(file_io.path) != module_context.py__file__(): + try: + m = load_module_from_path(module_context.inference_state, file_io) + yield m.as_context() + except FileNotFoundError: + pass + folder = folder.get_parent_folder() + + # prevent an infinite for loop if the same parent folder is return twice + if last_folder is not None and folder.path == last_folder.path: + break + last_folder = folder # keep track of the last found parent name + + for names in _PYTEST_FIXTURE_MODULES: + for module_value in module_context.inference_state.import_module(names): + yield module_value.as_context() + + +class FixtureFilter(ParserTreeFilter): + def _filter(self, names): + for name in super()._filter(names): + funcdef = name.parent + # Class fixtures are not supported + if funcdef.type == 'funcdef': + decorated = funcdef.parent + if decorated.type == 'decorated' and self._is_fixture(decorated): + yield name + + def _is_fixture(self, decorated): + decorators = decorated.children[0] + if decorators.type == 'decorators': + decorators = decorators.children + else: + decorators = [decorators] + for decorator in decorators: + dotted_name = decorator.children[1] + # A heuristic, this makes it faster. + if 'fixture' in dotted_name.get_code(): + if dotted_name.type == 'atom_expr': + # Since Python3.9 a decorator does not have dotted names + # anymore. + last_trailer = dotted_name.children[-1] + last_leaf = last_trailer.get_last_leaf() + if last_leaf == ')': + values = infer_call_of_leaf( + self.parent_context, last_leaf, cut_own_trailer=True) + else: + values = self.parent_context.infer_node(dotted_name) + else: + values = self.parent_context.infer_node(dotted_name) + for value in values: + if value.name.get_qualified_names(include_module_names=True) \ + == ('_pytest', 'fixtures', 'fixture'): + return True + return False diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/plugins/registry.py b/bundle/jedi-vim/pythonx/jedi/jedi/plugins/registry.py new file mode 100644 index 000000000..c1a0b7496 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/plugins/registry.py @@ -0,0 +1,12 @@ +""" +This is not a plugin, this is just the place were plugins are registered. +""" + +from jedi.plugins import stdlib +from jedi.plugins import flask +from jedi.plugins import pytest +from jedi.plugins import django +from jedi.plugins import plugin_manager + + +plugin_manager.register(stdlib, flask, pytest, django) diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/plugins/stdlib.py b/bundle/jedi-vim/pythonx/jedi/jedi/plugins/stdlib.py new file mode 100644 index 000000000..17f1df3bc --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/plugins/stdlib.py @@ -0,0 +1,868 @@ +""" +Implementations of standard library functions, because it's not possible to +understand them with Jedi. + +To add a new implementation, create a function and add it to the +``_implemented`` dict at the bottom of this module. + +Note that this module exists only to implement very specific functionality in +the standard library. The usual way to understand the standard library is the +compiled module that returns the types for C-builtins. +""" +import parso +import os +from inspect import Parameter + +from jedi import debug +from jedi.inference.utils import safe_property +from jedi.inference.helpers import get_str_or_none +from jedi.inference.arguments import iterate_argument_clinic, ParamIssue, \ + repack_with_argument_clinic, AbstractArguments, TreeArgumentsWrapper +from jedi.inference import analysis +from jedi.inference import compiled +from jedi.inference.value.instance import \ + AnonymousMethodExecutionContext, MethodExecutionContext +from jedi.inference.base_value import ContextualizedNode, \ + NO_VALUES, ValueSet, ValueWrapper, LazyValueWrapper +from jedi.inference.value import ClassValue, ModuleValue +from jedi.inference.value.klass import ClassMixin +from jedi.inference.value.function import FunctionMixin +from jedi.inference.value import iterable +from jedi.inference.lazy_value import LazyTreeValue, LazyKnownValue, \ + LazyKnownValues +from jedi.inference.names import ValueName, BaseTreeParamName +from jedi.inference.filters import AttributeOverwrite, publish_method, \ + ParserTreeFilter, DictFilter +from jedi.inference.signature import AbstractSignature, SignatureWrapper + + +# Copied from Python 3.6's stdlib. +_NAMEDTUPLE_CLASS_TEMPLATE = """\ +_property = property +_tuple = tuple +from operator import itemgetter as _itemgetter +from collections import OrderedDict + +class {typename}(tuple): + __slots__ = () + + _fields = {field_names!r} + + def __new__(_cls, {arg_list}): + 'Create new instance of {typename}({arg_list})' + return _tuple.__new__(_cls, ({arg_list})) + + @classmethod + def _make(cls, iterable, new=tuple.__new__, len=len): + 'Make a new {typename} object from a sequence or iterable' + result = new(cls, iterable) + if len(result) != {num_fields:d}: + raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result)) + return result + + def _replace(_self, **kwds): + 'Return a new {typename} object replacing specified fields with new values' + result = _self._make(map(kwds.pop, {field_names!r}, _self)) + if kwds: + raise ValueError('Got unexpected field names: %r' % list(kwds)) + return result + + def __repr__(self): + 'Return a nicely formatted representation string' + return self.__class__.__name__ + '({repr_fmt})' % self + + def _asdict(self): + 'Return a new OrderedDict which maps field names to their values.' + return OrderedDict(zip(self._fields, self)) + + def __getnewargs__(self): + 'Return self as a plain tuple. Used by copy and pickle.' + return tuple(self) + + # These methods were added by Jedi. + # __new__ doesn't really work with Jedi. So adding this to nametuples seems + # like the easiest way. + def __init__(self, {arg_list}): + 'A helper function for namedtuple.' + self.__iterable = ({arg_list}) + + def __iter__(self): + for i in self.__iterable: + yield i + + def __getitem__(self, y): + return self.__iterable[y] + +{field_defs} +""" + +_NAMEDTUPLE_FIELD_TEMPLATE = '''\ + {name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}') +''' + + +def execute(callback): + def wrapper(value, arguments): + def call(): + return callback(value, arguments=arguments) + + try: + obj_name = value.name.string_name + except AttributeError: + pass + else: + p = value.parent_context + if p is not None and p.is_builtins_module(): + module_name = 'builtins' + elif p is not None and p.is_module(): + module_name = p.py__name__() + else: + return call() + + if value.is_bound_method() or value.is_instance(): + # value can be an instance for example if it is a partial + # object. + return call() + + # for now we just support builtin functions. + try: + func = _implemented[module_name][obj_name] + except KeyError: + pass + else: + return func(value, arguments=arguments, callback=call) + return call() + + return wrapper + + +def _follow_param(inference_state, arguments, index): + try: + key, lazy_value = list(arguments.unpack())[index] + except IndexError: + return NO_VALUES + else: + return lazy_value.infer() + + +def argument_clinic(clinic_string, want_value=False, want_context=False, + want_arguments=False, want_inference_state=False, + want_callback=False): + """ + Works like Argument Clinic (PEP 436), to validate function params. + """ + + def f(func): + def wrapper(value, arguments, callback): + try: + args = tuple(iterate_argument_clinic( + value.inference_state, arguments, clinic_string)) + except ParamIssue: + return NO_VALUES + + debug.dbg('builtin start %s' % value, color='MAGENTA') + kwargs = {} + if want_context: + kwargs['context'] = arguments.context + if want_value: + kwargs['value'] = value + if want_inference_state: + kwargs['inference_state'] = value.inference_state + if want_arguments: + kwargs['arguments'] = arguments + if want_callback: + kwargs['callback'] = callback + result = func(*args, **kwargs) + debug.dbg('builtin end: %s', result, color='MAGENTA') + return result + + return wrapper + return f + + +@argument_clinic('iterator[, default], /', want_inference_state=True) +def builtins_next(iterators, defaults, inference_state): + # TODO theoretically we have to check here if something is an iterator. + # That is probably done by checking if it's not a class. + return defaults | iterators.py__getattribute__('__next__').execute_with_values() + + +@argument_clinic('iterator[, default], /') +def builtins_iter(iterators_or_callables, defaults): + # TODO implement this if it's a callable. + return iterators_or_callables.py__getattribute__('__iter__').execute_with_values() + + +@argument_clinic('object, name[, default], /') +def builtins_getattr(objects, names, defaults=None): + # follow the first param + for value in objects: + for name in names: + string = get_str_or_none(name) + if string is None: + debug.warning('getattr called without str') + continue + else: + return value.py__getattribute__(string) + return NO_VALUES + + +@argument_clinic('object[, bases, dict], /') +def builtins_type(objects, bases, dicts): + if bases or dicts: + # It's a type creation... maybe someday... + return NO_VALUES + else: + return objects.py__class__() + + +class SuperInstance(LazyValueWrapper): + """To be used like the object ``super`` returns.""" + def __init__(self, inference_state, instance): + self.inference_state = inference_state + self._instance = instance # Corresponds to super().__self__ + + def _get_bases(self): + return self._instance.py__class__().py__bases__() + + def _get_wrapped_value(self): + objs = self._get_bases()[0].infer().execute_with_values() + if not objs: + # This is just a fallback and will only be used, if it's not + # possible to find a class + return self._instance + return next(iter(objs)) + + def get_filters(self, origin_scope=None): + for b in self._get_bases(): + for value in b.infer().execute_with_values(): + for f in value.get_filters(): + yield f + + +@argument_clinic('[type[, value]], /', want_context=True) +def builtins_super(types, objects, context): + instance = None + if isinstance(context, AnonymousMethodExecutionContext): + instance = context.instance + elif isinstance(context, MethodExecutionContext): + instance = context.instance + if instance is None: + return NO_VALUES + return ValueSet({SuperInstance(instance.inference_state, instance)}) + + +class ReversedObject(AttributeOverwrite): + def __init__(self, reversed_obj, iter_list): + super().__init__(reversed_obj) + self._iter_list = iter_list + + def py__iter__(self, contextualized_node=None): + return self._iter_list + + @publish_method('__next__') + def _next(self, arguments): + return ValueSet.from_sets( + lazy_value.infer() for lazy_value in self._iter_list + ) + + +@argument_clinic('sequence, /', want_value=True, want_arguments=True) +def builtins_reversed(sequences, value, arguments): + # While we could do without this variable (just by using sequences), we + # want static analysis to work well. Therefore we need to generated the + # values again. + key, lazy_value = next(arguments.unpack()) + cn = None + if isinstance(lazy_value, LazyTreeValue): + cn = ContextualizedNode(lazy_value.context, lazy_value.data) + ordered = list(sequences.iterate(cn)) + + # Repack iterator values and then run it the normal way. This is + # necessary, because `reversed` is a function and autocompletion + # would fail in certain cases like `reversed(x).__iter__` if we + # just returned the result directly. + seq, = value.inference_state.typing_module.py__getattribute__('Iterator').execute_with_values() + return ValueSet([ReversedObject(seq, list(reversed(ordered)))]) + + +@argument_clinic('value, type, /', want_arguments=True, want_inference_state=True) +def builtins_isinstance(objects, types, arguments, inference_state): + bool_results = set() + for o in objects: + cls = o.py__class__() + try: + cls.py__bases__ + except AttributeError: + # This is temporary. Everything should have a class attribute in + # Python?! Maybe we'll leave it here, because some numpy objects or + # whatever might not. + bool_results = set([True, False]) + break + + mro = list(cls.py__mro__()) + + for cls_or_tup in types: + if cls_or_tup.is_class(): + bool_results.add(cls_or_tup in mro) + elif cls_or_tup.name.string_name == 'tuple' \ + and cls_or_tup.get_root_context().is_builtins_module(): + # Check for tuples. + classes = ValueSet.from_sets( + lazy_value.infer() + for lazy_value in cls_or_tup.iterate() + ) + bool_results.add(any(cls in mro for cls in classes)) + else: + _, lazy_value = list(arguments.unpack())[1] + if isinstance(lazy_value, LazyTreeValue): + node = lazy_value.data + message = 'TypeError: isinstance() arg 2 must be a ' \ + 'class, type, or tuple of classes and types, ' \ + 'not %s.' % cls_or_tup + analysis.add(lazy_value.context, 'type-error-isinstance', node, message) + + return ValueSet( + compiled.builtin_from_name(inference_state, str(b)) + for b in bool_results + ) + + +class StaticMethodObject(ValueWrapper): + def py__get__(self, instance, class_value): + return ValueSet([self._wrapped_value]) + + +@argument_clinic('sequence, /') +def builtins_staticmethod(functions): + return ValueSet(StaticMethodObject(f) for f in functions) + + +class ClassMethodObject(ValueWrapper): + def __init__(self, class_method_obj, function): + super().__init__(class_method_obj) + self._function = function + + def py__get__(self, instance, class_value): + return ValueSet([ + ClassMethodGet(__get__, class_value, self._function) + for __get__ in self._wrapped_value.py__getattribute__('__get__') + ]) + + +class ClassMethodGet(ValueWrapper): + def __init__(self, get_method, klass, function): + super().__init__(get_method) + self._class = klass + self._function = function + + def get_signatures(self): + return [sig.bind(self._function) for sig in self._function.get_signatures()] + + def py__call__(self, arguments): + return self._function.execute(ClassMethodArguments(self._class, arguments)) + + +class ClassMethodArguments(TreeArgumentsWrapper): + def __init__(self, klass, arguments): + super().__init__(arguments) + self._class = klass + + def unpack(self, func=None): + yield None, LazyKnownValue(self._class) + for values in self._wrapped_arguments.unpack(func): + yield values + + +@argument_clinic('sequence, /', want_value=True, want_arguments=True) +def builtins_classmethod(functions, value, arguments): + return ValueSet( + ClassMethodObject(class_method_object, function) + for class_method_object in value.py__call__(arguments=arguments) + for function in functions + ) + + +class PropertyObject(AttributeOverwrite, ValueWrapper): + api_type = 'property' + + def __init__(self, property_obj, function): + super().__init__(property_obj) + self._function = function + + def py__get__(self, instance, class_value): + if instance is None: + return ValueSet([self]) + return self._function.execute_with_values(instance) + + @publish_method('deleter') + @publish_method('getter') + @publish_method('setter') + def _return_self(self, arguments): + return ValueSet({self}) + + +@argument_clinic('func, /', want_callback=True) +def builtins_property(functions, callback): + return ValueSet( + PropertyObject(property_value, function) + for property_value in callback() + for function in functions + ) + + +def collections_namedtuple(value, arguments, callback): + """ + Implementation of the namedtuple function. + + This has to be done by processing the namedtuple class template and + inferring the result. + + """ + inference_state = value.inference_state + + # Process arguments + name = 'jedi_unknown_namedtuple' + for c in _follow_param(inference_state, arguments, 0): + x = get_str_or_none(c) + if x is not None: + name = x + break + + # TODO here we only use one of the types, we should use all. + param_values = _follow_param(inference_state, arguments, 1) + if not param_values: + return NO_VALUES + _fields = list(param_values)[0] + string = get_str_or_none(_fields) + if string is not None: + fields = string.replace(',', ' ').split() + elif isinstance(_fields, iterable.Sequence): + fields = [ + get_str_or_none(v) + for lazy_value in _fields.py__iter__() + for v in lazy_value.infer() + ] + fields = [f for f in fields if f is not None] + else: + return NO_VALUES + + # Build source code + code = _NAMEDTUPLE_CLASS_TEMPLATE.format( + typename=name, + field_names=tuple(fields), + num_fields=len(fields), + arg_list=repr(tuple(fields)).replace("'", "")[1:-1], + repr_fmt='', + field_defs='\n'.join(_NAMEDTUPLE_FIELD_TEMPLATE.format(index=index, name=name) + for index, name in enumerate(fields)) + ) + + # Parse source code + module = inference_state.grammar.parse(code) + generated_class = next(module.iter_classdefs()) + parent_context = ModuleValue( + inference_state, module, + code_lines=parso.split_lines(code, keepends=True), + ).as_context() + + return ValueSet([ClassValue(inference_state, parent_context, generated_class)]) + + +class PartialObject(ValueWrapper): + def __init__(self, actual_value, arguments, instance=None): + super().__init__(actual_value) + self._arguments = arguments + self._instance = instance + + def _get_functions(self, unpacked_arguments): + key, lazy_value = next(unpacked_arguments, (None, None)) + if key is not None or lazy_value is None: + debug.warning("Partial should have a proper function %s", self._arguments) + return None + return lazy_value.infer() + + def get_signatures(self): + unpacked_arguments = self._arguments.unpack() + funcs = self._get_functions(unpacked_arguments) + if funcs is None: + return [] + + arg_count = 0 + if self._instance is not None: + arg_count = 1 + keys = set() + for key, _ in unpacked_arguments: + if key is None: + arg_count += 1 + else: + keys.add(key) + return [PartialSignature(s, arg_count, keys) for s in funcs.get_signatures()] + + def py__call__(self, arguments): + funcs = self._get_functions(self._arguments.unpack()) + if funcs is None: + return NO_VALUES + + return funcs.execute( + MergedPartialArguments(self._arguments, arguments, self._instance) + ) + + def py__doc__(self): + """ + In CPython partial does not replace the docstring. However we are still + imitating it here, because we want this docstring to be worth something + for the user. + """ + callables = self._get_functions(self._arguments.unpack()) + if callables is None: + return '' + for callable_ in callables: + return callable_.py__doc__() + return '' + + def py__get__(self, instance, class_value): + return ValueSet([self]) + + +class PartialMethodObject(PartialObject): + def py__get__(self, instance, class_value): + if instance is None: + return ValueSet([self]) + return ValueSet([PartialObject(self._wrapped_value, self._arguments, instance)]) + + +class PartialSignature(SignatureWrapper): + def __init__(self, wrapped_signature, skipped_arg_count, skipped_arg_set): + super().__init__(wrapped_signature) + self._skipped_arg_count = skipped_arg_count + self._skipped_arg_set = skipped_arg_set + + def get_param_names(self, resolve_stars=False): + names = self._wrapped_signature.get_param_names()[self._skipped_arg_count:] + return [n for n in names if n.string_name not in self._skipped_arg_set] + + +class MergedPartialArguments(AbstractArguments): + def __init__(self, partial_arguments, call_arguments, instance=None): + self._partial_arguments = partial_arguments + self._call_arguments = call_arguments + self._instance = instance + + def unpack(self, funcdef=None): + unpacked = self._partial_arguments.unpack(funcdef) + # Ignore this one, it's the function. It was checked before that it's + # there. + next(unpacked, None) + if self._instance is not None: + yield None, LazyKnownValue(self._instance) + for key_lazy_value in unpacked: + yield key_lazy_value + for key_lazy_value in self._call_arguments.unpack(funcdef): + yield key_lazy_value + + +def functools_partial(value, arguments, callback): + return ValueSet( + PartialObject(instance, arguments) + for instance in value.py__call__(arguments) + ) + + +def functools_partialmethod(value, arguments, callback): + return ValueSet( + PartialMethodObject(instance, arguments) + for instance in value.py__call__(arguments) + ) + + +@argument_clinic('first, /') +def _return_first_param(firsts): + return firsts + + +@argument_clinic('seq') +def _random_choice(sequences): + return ValueSet.from_sets( + lazy_value.infer() + for sequence in sequences + for lazy_value in sequence.py__iter__() + ) + + +def _dataclass(value, arguments, callback): + for c in _follow_param(value.inference_state, arguments, 0): + if c.is_class(): + return ValueSet([DataclassWrapper(c)]) + else: + return ValueSet([value]) + return NO_VALUES + + +class DataclassWrapper(ValueWrapper, ClassMixin): + def get_signatures(self): + param_names = [] + for cls in reversed(list(self.py__mro__())): + if isinstance(cls, DataclassWrapper): + filter_ = cls.as_context().get_global_filter() + # .values ordering is not guaranteed, at least not in + # Python < 3.6, when dicts where not ordered, which is an + # implementation detail anyway. + for name in sorted(filter_.values(), key=lambda name: name.start_pos): + d = name.tree_name.get_definition() + annassign = d.children[1] + if d.type == 'expr_stmt' and annassign.type == 'annassign': + if len(annassign.children) < 4: + default = None + else: + default = annassign.children[3] + param_names.append(DataclassParamName( + parent_context=cls.parent_context, + tree_name=name.tree_name, + annotation_node=annassign.children[1], + default_node=default, + )) + return [DataclassSignature(cls, param_names)] + + +class DataclassSignature(AbstractSignature): + def __init__(self, value, param_names): + super().__init__(value) + self._param_names = param_names + + def get_param_names(self, resolve_stars=False): + return self._param_names + + +class DataclassParamName(BaseTreeParamName): + def __init__(self, parent_context, tree_name, annotation_node, default_node): + super().__init__(parent_context, tree_name) + self.annotation_node = annotation_node + self.default_node = default_node + + def get_kind(self): + return Parameter.POSITIONAL_OR_KEYWORD + + def infer(self): + if self.annotation_node is None: + return NO_VALUES + else: + return self.parent_context.infer_node(self.annotation_node) + + +class ItemGetterCallable(ValueWrapper): + def __init__(self, instance, args_value_set): + super().__init__(instance) + self._args_value_set = args_value_set + + @repack_with_argument_clinic('item, /') + def py__call__(self, item_value_set): + value_set = NO_VALUES + for args_value in self._args_value_set: + lazy_values = list(args_value.py__iter__()) + if len(lazy_values) == 1: + # TODO we need to add the contextualized value. + value_set |= item_value_set.get_item(lazy_values[0].infer(), None) + else: + value_set |= ValueSet([iterable.FakeList( + self._wrapped_value.inference_state, + [ + LazyKnownValues(item_value_set.get_item(lazy_value.infer(), None)) + for lazy_value in lazy_values + ], + )]) + return value_set + + +@argument_clinic('func, /') +def _functools_wraps(funcs): + return ValueSet(WrapsCallable(func) for func in funcs) + + +class WrapsCallable(ValueWrapper): + # XXX this is not the correct wrapped value, it should be a weird + # partials object, but it doesn't matter, because it's always used as a + # decorator anyway. + @repack_with_argument_clinic('func, /') + def py__call__(self, funcs): + return ValueSet({Wrapped(func, self._wrapped_value) for func in funcs}) + + +class Wrapped(ValueWrapper, FunctionMixin): + def __init__(self, func, original_function): + super().__init__(func) + self._original_function = original_function + + @property + def name(self): + return self._original_function.name + + def get_signature_functions(self): + return [self] + + +@argument_clinic('*args, /', want_value=True, want_arguments=True) +def _operator_itemgetter(args_value_set, value, arguments): + return ValueSet([ + ItemGetterCallable(instance, args_value_set) + for instance in value.py__call__(arguments) + ]) + + +def _create_string_input_function(func): + @argument_clinic('string, /', want_value=True, want_arguments=True) + def wrapper(strings, value, arguments): + def iterate(): + for value in strings: + s = get_str_or_none(value) + if s is not None: + s = func(s) + yield compiled.create_simple_object(value.inference_state, s) + values = ValueSet(iterate()) + if values: + return values + return value.py__call__(arguments) + return wrapper + + +@argument_clinic('*args, /', want_callback=True) +def _os_path_join(args_set, callback): + if len(args_set) == 1: + string = '' + sequence, = args_set + is_first = True + for lazy_value in sequence.py__iter__(): + string_values = lazy_value.infer() + if len(string_values) != 1: + break + s = get_str_or_none(next(iter(string_values))) + if s is None: + break + if not is_first: + string += os.path.sep + string += s + is_first = False + else: + return ValueSet([compiled.create_simple_object(sequence.inference_state, string)]) + return callback() + + +_implemented = { + 'builtins': { + 'getattr': builtins_getattr, + 'type': builtins_type, + 'super': builtins_super, + 'reversed': builtins_reversed, + 'isinstance': builtins_isinstance, + 'next': builtins_next, + 'iter': builtins_iter, + 'staticmethod': builtins_staticmethod, + 'classmethod': builtins_classmethod, + 'property': builtins_property, + }, + 'copy': { + 'copy': _return_first_param, + 'deepcopy': _return_first_param, + }, + 'json': { + 'load': lambda value, arguments, callback: NO_VALUES, + 'loads': lambda value, arguments, callback: NO_VALUES, + }, + 'collections': { + 'namedtuple': collections_namedtuple, + }, + 'functools': { + 'partial': functools_partial, + 'partialmethod': functools_partialmethod, + 'wraps': _functools_wraps, + }, + '_weakref': { + 'proxy': _return_first_param, + }, + 'random': { + 'choice': _random_choice, + }, + 'operator': { + 'itemgetter': _operator_itemgetter, + }, + 'abc': { + # Not sure if this is necessary, but it's used a lot in typeshed and + # it's for now easier to just pass the function. + 'abstractmethod': _return_first_param, + }, + 'typing': { + # The _alias function just leads to some annoying type inference. + # Therefore, just make it return nothing, which leads to the stubs + # being used instead. This only matters for 3.7+. + '_alias': lambda value, arguments, callback: NO_VALUES, + # runtime_checkable doesn't really change anything and is just + # adding logs for infering stuff, so we can safely ignore it. + 'runtime_checkable': lambda value, arguments, callback: NO_VALUES, + }, + 'dataclasses': { + # For now this works at least better than Jedi trying to understand it. + 'dataclass': _dataclass + }, + 'os.path': { + 'dirname': _create_string_input_function(os.path.dirname), + 'abspath': _create_string_input_function(os.path.abspath), + 'relpath': _create_string_input_function(os.path.relpath), + 'join': _os_path_join, + } +} + + +def get_metaclass_filters(func): + def wrapper(cls, metaclasses, is_instance): + for metaclass in metaclasses: + if metaclass.py__name__() == 'EnumMeta' \ + and metaclass.get_root_context().py__name__() == 'enum': + filter_ = ParserTreeFilter(parent_context=cls.as_context()) + return [DictFilter({ + name.string_name: EnumInstance(cls, name).name + for name in filter_.values() + })] + return func(cls, metaclasses, is_instance) + return wrapper + + +class EnumInstance(LazyValueWrapper): + def __init__(self, cls, name): + self.inference_state = cls.inference_state + self._cls = cls # Corresponds to super().__self__ + self._name = name + self.tree_node = self._name.tree_name + + @safe_property + def name(self): + return ValueName(self, self._name.tree_name) + + def _get_wrapped_value(self): + n = self._name.string_name + if n.startswith('__') and n.endswith('__') or self._name.api_type == 'function': + inferred = self._name.infer() + if inferred: + return next(iter(inferred)) + o, = self.inference_state.builtins_module.py__getattribute__('object') + return o + + value, = self._cls.execute_with_values() + return value + + def get_filters(self, origin_scope=None): + yield DictFilter(dict( + name=compiled.create_simple_object(self.inference_state, self._name.string_name).name, + value=self._name, + )) + for f in self._get_wrapped_value().get_filters(): + yield f + + +def tree_name_to_values(func): + def wrapper(inference_state, context, tree_name): + if tree_name.value == 'sep' and context.is_module() and context.py__name__() == 'os.path': + return ValueSet({ + compiled.create_simple_object(inference_state, os.path.sep), + }) + return func(inference_state, context, tree_name) + return wrapper diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/settings.py b/bundle/jedi-vim/pythonx/jedi/jedi/settings.py new file mode 100644 index 000000000..c31a474a5 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/settings.py @@ -0,0 +1,154 @@ +""" +This module contains variables with global |jedi| settings. To change the +behavior of |jedi|, change the variables defined in :mod:`jedi.settings`. + +Plugins should expose an interface so that the user can adjust the +configuration. + + +Example usage:: + + from jedi import settings + settings.case_insensitive_completion = True + + +Completion output +~~~~~~~~~~~~~~~~~ + +.. autodata:: case_insensitive_completion +.. autodata:: add_bracket_after_function + + +Filesystem cache +~~~~~~~~~~~~~~~~ + +.. autodata:: cache_directory + + +Parser +~~~~~~ + +.. autodata:: fast_parser + + +Dynamic stuff +~~~~~~~~~~~~~ + +.. autodata:: dynamic_array_additions +.. autodata:: dynamic_params +.. autodata:: dynamic_params_for_other_modules +.. autodata:: auto_import_modules + + +Caching +~~~~~~~ + +.. autodata:: call_signatures_validity + + +""" +import os +import platform + +# ---------------- +# Completion Output Settings +# ---------------- + +case_insensitive_completion = True +""" +Completions are by default case insensitive. +""" + +add_bracket_after_function = False +""" +Adds an opening bracket after a function for completions. +""" + +# ---------------- +# Filesystem Cache +# ---------------- + +if platform.system().lower() == 'windows': + _cache_directory = os.path.join( + os.getenv('LOCALAPPDATA') or os.path.expanduser('~'), + 'Jedi', + 'Jedi', + ) +elif platform.system().lower() == 'darwin': + _cache_directory = os.path.join('~', 'Library', 'Caches', 'Jedi') +else: + _cache_directory = os.path.join(os.getenv('XDG_CACHE_HOME') or '~/.cache', + 'jedi') +cache_directory = os.path.expanduser(_cache_directory) +""" +The path where the cache is stored. + +On Linux, this defaults to ``~/.cache/jedi/``, on OS X to +``~/Library/Caches/Jedi/`` and on Windows to ``%LOCALAPPDATA%\\Jedi\\Jedi\\``. +On Linux, if the environment variable ``$XDG_CACHE_HOME`` is set, +``$XDG_CACHE_HOME/jedi`` is used instead of the default one. +""" + +# ---------------- +# Parser +# ---------------- + +fast_parser = True +""" +Uses Parso's diff parser. If it is enabled, this might cause issues, please +read the warning on :class:`.Script`. This feature makes it possible to only +parse the parts again that have changed, while reusing the rest of the syntax +tree. +""" + +_cropped_file_size = int(10e6) # 1 Megabyte +""" +Jedi gets extremely slow if the file size exceed a few thousand lines. +To avoid getting stuck completely Jedi crops the file at some point. + +One megabyte of typical Python code equals about 20'000 lines of code. +""" + +# ---------------- +# Dynamic Stuff +# ---------------- + +dynamic_array_additions = True +""" +check for `append`, etc. on arrays: [], {}, () as well as list/set calls. +""" + +dynamic_params = True +""" +A dynamic param completion, finds the callees of the function, which define +the params of a function. +""" + +dynamic_params_for_other_modules = True +""" +Do the same for other modules. +""" + +dynamic_flow_information = True +""" +Check for `isinstance` and other information to infer a type. +""" + +auto_import_modules = [ + 'gi', # This third-party repository (GTK stuff) doesn't really work with jedi +] +""" +Modules that will not be analyzed but imported, if they contain Python code. +This improves autocompletion for libraries that use ``setattr`` or +``globals()`` modifications a lot. +""" + +# ---------------- +# Caching Validity +# ---------------- + +call_signatures_validity = 3.0 +""" +Finding function calls might be slow (0.1-0.5s). This is not acceptible for +normal writing. Therefore cache it for a short time. +""" diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/third_party/README_typeshed.md b/bundle/jedi-vim/pythonx/jedi/jedi/third_party/README_typeshed.md new file mode 100644 index 000000000..60ae79df4 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/third_party/README_typeshed.md @@ -0,0 +1,30 @@ +# Typeshed in Jedi + +Typeshed is used in Jedi to provide completions for all the stdlib modules. + +The relevant files in jedi are in `jedi/inference/gradual`. `gradual` stands +for "gradual typing". + +## Updating Typeshed + +Currently Jedi has a custom implementation hosted in +https://github.com/davidhalter/typeshed.git for two reasons: + +- Jedi doesn't understand Tuple.__init__ properly. +- Typeshed has a bug: https://github.com/python/typeshed/issues/2999 + +Therefore we need a bit of a complicated process to upgrade typeshed: + + cd jedi/third_party/typeshed + git remote add upstream https://github.com/python/typeshed + git fetch upstream + git checkout jedi + git rebase upstream/master + git push -f + + git push + cd ../../.. + git commit jedi/third_party/typeshed -m "Upgrade typeshed" + +If merge conflicts appear, just make sure that only one commit from Jedi +appears. diff --git a/bundle/jedi-vim/pythonx/jedi/jedi/utils.py b/bundle/jedi-vim/pythonx/jedi/jedi/utils.py new file mode 100644 index 000000000..b7e1c1a84 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/jedi/utils.py @@ -0,0 +1,134 @@ +""" +Utilities for end-users. +""" + +import __main__ # type: ignore[import] +from collections import namedtuple +import logging +import traceback +import re +import os +import sys + +from jedi import Interpreter + + +READLINE_DEBUG = False + + +def setup_readline(namespace_module=__main__, fuzzy=False): + """ + This function sets up :mod:`readline` to use Jedi in a Python interactive + shell. + + If you want to use a custom ``PYTHONSTARTUP`` file (typically + ``$HOME/.pythonrc.py``), you can add this piece of code:: + + try: + from jedi.utils import setup_readline + except ImportError: + # Fallback to the stdlib readline completer if it is installed. + # Taken from http://docs.python.org/2/library/rlcompleter.html + print("Jedi is not installed, falling back to readline") + try: + import readline + import rlcompleter + readline.parse_and_bind("tab: complete") + except ImportError: + print("Readline is not installed either. No tab completion is enabled.") + else: + setup_readline() + + This will fallback to the readline completer if Jedi is not installed. + The readline completer will only complete names in the global namespace, + so for example:: + + ran + + will complete to ``range``. + + With Jedi the following code:: + + range(10).cou + + will complete to ``range(10).count``, this does not work with the default + cPython :mod:`readline` completer. + + You will also need to add ``export PYTHONSTARTUP=$HOME/.pythonrc.py`` to + your shell profile (usually ``.bash_profile`` or ``.profile`` if you use + bash). + """ + if READLINE_DEBUG: + logging.basicConfig( + filename='/tmp/jedi.log', + filemode='a', + level=logging.DEBUG + ) + + class JediRL: + def complete(self, text, state): + """ + This complete stuff is pretty weird, a generator would make + a lot more sense, but probably due to backwards compatibility + this is still the way how it works. + + The only important part is stuff in the ``state == 0`` flow, + everything else has been copied from the ``rlcompleter`` std. + library module. + """ + if state == 0: + sys.path.insert(0, os.getcwd()) + # Calling python doesn't have a path, so add to sys.path. + try: + logging.debug("Start REPL completion: " + repr(text)) + interpreter = Interpreter(text, [namespace_module.__dict__]) + + completions = interpreter.complete(fuzzy=fuzzy) + logging.debug("REPL completions: %s", completions) + + self.matches = [ + text[:len(text) - c._like_name_length] + c.name_with_symbols + for c in completions + ] + except: + logging.error("REPL Completion error:\n" + traceback.format_exc()) + raise + finally: + sys.path.pop(0) + try: + return self.matches[state] + except IndexError: + return None + + try: + # Need to import this one as well to make sure it's executed before + # this code. This didn't use to be an issue until 3.3. Starting with + # 3.4 this is different, it always overwrites the completer if it's not + # already imported here. + import rlcompleter # noqa: F401 + import readline + except ImportError: + print("Jedi: Module readline not available.") + else: + readline.set_completer(JediRL().complete) + readline.parse_and_bind("tab: complete") + # jedi itself does the case matching + readline.parse_and_bind("set completion-ignore-case on") + # because it's easier to hit the tab just once + readline.parse_and_bind("set show-all-if-unmodified") + readline.parse_and_bind("set show-all-if-ambiguous on") + # don't repeat all the things written in the readline all the time + readline.parse_and_bind("set completion-prefix-display-length 2") + # No delimiters, Jedi handles that. + readline.set_completer_delims('') + + +def version_info(): + """ + Returns a namedtuple of Jedi's version, similar to Python's + ``sys.version_info``. + """ + Version = namedtuple('Version', 'major, minor, micro') + from jedi import __version__ + tupl = re.findall(r'[a-z]+|\d+', __version__) + return Version(*[x if i == 3 else int(x) for i, x in enumerate(tupl)]) diff --git a/bundle/jedi-vim/pythonx/jedi/pytest.ini b/bundle/jedi-vim/pythonx/jedi/pytest.ini new file mode 100644 index 000000000..d2da07a53 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/pytest.ini @@ -0,0 +1,13 @@ +[pytest] +addopts = --doctest-modules + +# Ignore broken files in blackbox test directories +norecursedirs = .* jedi/third_party scripts docs + test/completion test/refactor test/static_analysis test/examples + +# Activate `clean_jedi_cache` fixture for all tests. This should be +# fine as long as we are using `clean_jedi_cache` as a session scoped +# fixture. +usefixtures = clean_jedi_cache + +testpaths = jedi test diff --git a/bundle/jedi-vim/pythonx/jedi/scripts/diff_parser_profile.py b/bundle/jedi-vim/pythonx/jedi/scripts/diff_parser_profile.py new file mode 100644 index 000000000..93a12029a --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/scripts/diff_parser_profile.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python +""" +Profile a piece of Python code with ``cProfile`` that uses the diff parser. + +Usage: + profile.py [-d] [-s ] + profile.py -h | --help + +Options: + -h --help Show this screen. + -d --debug Enable Jedi internal debugging. + -s Sort the profile results, e.g. cumtime, name [default: time]. +""" + +import cProfile + +from docopt import docopt +from jedi.parser.python import load_grammar +from jedi.parser.diff import DiffParser +from jedi.parser.python import ParserWithRecovery +from jedi.common import splitlines +import jedi + + +def run(parser, lines): + diff_parser = DiffParser(parser) + diff_parser.update(lines) + # Make sure used_names is loaded + parser.module.used_names + + +def main(args): + if args['--debug']: + jedi.set_debug_function(notices=True) + + with open(args['']) as f: + code = f.read() + grammar = load_grammar() + parser = ParserWithRecovery(grammar, code) + # Make sure used_names is loaded + parser.module.used_names + + code = code + '\na\n' # Add something so the diff parser needs to run. + lines = splitlines(code, keepends=True) + cProfile.runctx('run(parser, lines)', globals(), locals(), sort=args['-s']) + + +if __name__ == '__main__': + args = docopt(__doc__) + main(args) diff --git a/bundle/jedi-vim/pythonx/jedi/scripts/memory_check.py b/bundle/jedi-vim/pythonx/jedi/scripts/memory_check.py new file mode 100644 index 000000000..7bbcad2b5 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/scripts/memory_check.py @@ -0,0 +1,58 @@ +#! /usr/bin/env python +""" +This is a convenience script to test the speed and memory usage of Jedi with +large libraries. + +Each library is preloaded by jedi, recording the time and memory consumed by +each operation. + +You can provide additional libraries via command line arguments. + +Note: This requires the psutil library, available on PyPI. +""" +import time +import sys +import os +import psutil +sys.path.insert(0, os.path.abspath(os.path.dirname(__file__) + '/..')) +import jedi + + +def used_memory(): + """Return the total MB of System Memory in use.""" + return psutil.virtual_memory().used / 2 ** 20 + + +def profile_preload(mod): + """Preload a module into Jedi, recording time and memory used.""" + base = used_memory() + t0 = time.time() + jedi.preload_module(mod) + elapsed = time.time() - t0 + used = used_memory() - base + return elapsed, used + + +def main(mods): + """Preload the modules, and print the time and memory used.""" + t0 = time.time() + baseline = used_memory() + print('Time (s) | Mem (MB) | Package') + print('------------------------------') + for mod in mods: + elapsed, used = profile_preload(mod) + if used > 0: + print('%8.2f | %8d | %s' % (elapsed, used, mod)) + print('------------------------------') + elapsed = time.time() - t0 + used = used_memory() - baseline + print('%8.2f | %8d | %s' % (elapsed, used, 'Total')) + + +if __name__ == '__main__': + if sys.argv[1:]: + mods = sys.argv[1:] + else: + mods = ['re', 'numpy', 'scipy', 'scipy.sparse', 'scipy.stats', + 'wx', 'decimal', 'PyQt4.QtGui', 'PySide.QtGui', 'Tkinter'] + main(mods) diff --git a/bundle/jedi-vim/pythonx/jedi/scripts/profile_output.py b/bundle/jedi-vim/pythonx/jedi/scripts/profile_output.py new file mode 100644 index 000000000..53e0046c4 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/scripts/profile_output.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3.6 +""" +Profile a piece of Python code with ``profile``. Tries a completion on a +certain piece of code. + +Usage: + profile.py [] [-n ] [-d] [-o] [-s ] [-i] [--precision] + profile.py -h | --help + +Options: + -h --help Show this screen. + -n Number of passes before profiling [default: 1]. + -d --debug Enable Jedi internal debugging. + -o --omit Omit profiler, just do a normal run. + -i --infer Infer types instead of completions. + -s Sort the profile results, e.g. cum, name [default: time]. + --precision Makes profile time formatting more precise (nanoseconds) +""" + +import time +import profile +import pstats + +from docopt import docopt +import jedi + + +# Monkeypatch the time formatting function of profiling to make it easier to +# understand small time differences. +def f8(x): + ret = "%7.3f " % x + if ret == ' 0.000 ': + return "%6dµs" % (x * 1e6) + if ret.startswith(' 0.00'): + return "%8.4f" % x + return ret + + +def run(code, index, infer=False): + start = time.time() + script = jedi.Script(code) + if infer: + result = script.infer() + else: + result = script.complete() + print('Used %ss for the %sth run.' % (time.time() - start, index + 1)) + return result + + +def main(args): + code = args[''] + infer = args['--infer'] + n = int(args['-n']) + + for i in range(n): + run(code, i, infer=infer) + + if args['--precision']: + pstats.f8 = f8 + + jedi.set_debug_function(notices=args['--debug']) + if args['--omit']: + run(code, n, infer=infer) + else: + profile.runctx('run(code, n, infer=infer)', globals(), locals(), sort=args['-s']) + + +if __name__ == '__main__': + args = docopt(__doc__) + if args[''] is None: + args[''] = 'import numpy; numpy.array([0]).' + main(args) diff --git a/bundle/jedi-vim/pythonx/jedi/scripts/profiled_pytest.sh b/bundle/jedi-vim/pythonx/jedi/scripts/profiled_pytest.sh new file mode 100644 index 000000000..b61df1c29 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/scripts/profiled_pytest.sh @@ -0,0 +1 @@ +python3 -m profile -s tottime $(which pytest) $@ diff --git a/bundle/jedi-vim/pythonx/jedi/scripts/wx_check.py b/bundle/jedi-vim/pythonx/jedi/scripts/wx_check.py new file mode 100644 index 000000000..6d49aa773 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/scripts/wx_check.py @@ -0,0 +1,62 @@ +#! /usr/bin/env python +""" +Depends: ``objgraph`` (third party Python library) + +``wx._core`` is a very nice module to test Jedi's speed and memory performance +on big Python modules. Its size is ~16kLOC (one file). It also seems to look +like a typical big Python modules. A mix between a lot of different Python +things. + +You can view a markup version of it here: +https://github.com/wxWidgets/wxPython/blob/master/src/gtk/_core.py +""" + +import resource +import time +import sys +try: + import urllib.request as urllib2 +except ImportError: + import urllib2 +import gc +from os.path import abspath, dirname + +import objgraph + +sys.path.insert(0, dirname(dirname(abspath(__file__)))) +import jedi + + +def process_memory(): + """ + In kB according to + https://stackoverflow.com/questions/938733/total-memory-used-by-python-process + """ + return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss + + +uri = 'https://raw.githubusercontent.com/wxWidgets/wxPython/master/src/gtk/_core.py' + +wx_core = urllib2.urlopen(uri).read() + + +def run(): + start = time.time() + print('Process Memory before: %skB' % process_memory()) + # After this the module should be cached. + # Need to invent a path so that it's really cached. + jedi.Script(wx_core, path='foobar.py').complete() + + gc.collect() # make sure that it's all fair and the gc did its job. + print('Process Memory after: %skB' % process_memory()) + + print(objgraph.most_common_types(limit=50)) + print('\nIt took %s seconds to parse the file.' % (time.time() - start)) + + +print('First pass') +run() +print('\nSecond pass') +run() +print('\nThird pass') +run() diff --git a/bundle/jedi-vim/pythonx/jedi/setup.cfg b/bundle/jedi-vim/pythonx/jedi/setup.cfg new file mode 100644 index 000000000..98a775aad --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/setup.cfg @@ -0,0 +1,54 @@ +[bdist_wheel] +universal=1 + +[flake8] +max-line-length = 100 +ignore = + # do not use bare 'except' + E722, + # don't know why this was ever even an option, 1+1 should be possible. + E226, + # Sometimes `type() is` makes sense and is better than isinstance. Code + # review is there to find the times when it doesn't make sense. + E721, + # Line break before binary operator + W503, + # Single letter loop variables are often fine + E741, +per-file-ignores = + # Ignore apparently unused imports in files where we're (implicitly) + # re-exporting them. + jedi/__init__.py:F401 + jedi/inference/compiled/__init__.py:F401 + jedi/inference/value/__init__.py:F401 +exclude = jedi/third_party/* .tox/* + +[pycodestyle] +max-line-length = 100 + + +[mypy] +# Ensure generics are explicit about what they are (e.g: `List[str]` rather than +# just `List`) +disallow_any_generics = True + +disallow_subclassing_any = True + +# Avoid creating future gotchas emerging from bad typing +warn_redundant_casts = True +warn_unused_ignores = True +warn_return_any = True +warn_unused_configs = True + +warn_unreachable = True + +# Require values to be explicitly re-exported; this makes things easier for +# Flake8 too and avoids accidentally importing thing from the "wrong" place +# (which helps avoid circular imports) +implicit_reexport = False + +strict_equality = True + +[mypy-jedi,jedi.inference.compiled,jedi.inference.value,parso] +# Various __init__.py files which contain re-exports we want to implicitly make. +implicit_reexport = True diff --git a/bundle/jedi-vim/pythonx/jedi/setup.py b/bundle/jedi-vim/pythonx/jedi/setup.py new file mode 100644 index 000000000..e20d14c2a --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/setup.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python + +from setuptools import setup, find_packages +from setuptools.depends import get_module_constant + +import os + +__AUTHOR__ = 'David Halter' +__AUTHOR_EMAIL__ = 'davidhalter88@gmail.com' + +# Get the version from within jedi. It's defined in exactly one place now. +version = get_module_constant("jedi", "__version__") + +readme = open('README.rst').read() + '\n\n' + open('CHANGELOG.rst').read() + +assert os.path.isfile("jedi/third_party/typeshed/LICENSE"), \ + "Please download the typeshed submodule first (Hint: git submodule update --init)" +assert os.path.isfile("jedi/third_party/django-stubs/LICENSE.txt"), \ + "Please download the django-stubs submodule first (Hint: git submodule update --init)" + +setup(name='jedi', + version=version, + description='An autocompletion tool for Python that can be used for text editors.', + author=__AUTHOR__, + author_email=__AUTHOR_EMAIL__, + include_package_data=True, + maintainer=__AUTHOR__, + maintainer_email=__AUTHOR_EMAIL__, + url='https://github.com/davidhalter/jedi', + license='MIT', + keywords='python completion refactoring vim', + long_description=readme, + packages=find_packages(exclude=['test', 'test.*']), + python_requires='>=3.6', + install_requires=['parso>=0.8.0,<0.9.0'], + extras_require={ + 'testing': [ + 'pytest<7.0.0', + # docopt for sith doctests + 'docopt', + # coloroma for colored debug output + 'colorama', + 'Django<3.1', # For now pin this. + ], + 'qa': [ + 'flake8==3.8.3', + 'mypy==0.782', + ], + }, + package_data={'jedi': ['*.pyi', 'third_party/typeshed/LICENSE', + 'third_party/typeshed/README']}, + platforms=['any'], + classifiers=[ + 'Development Status :: 4 - Beta', + 'Environment :: Plugins', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Topic :: Software Development :: Libraries :: Python Modules', + 'Topic :: Text Editors :: Integrated Development Environments (IDE)', + 'Topic :: Utilities', + ], + ) diff --git a/bundle/jedi-vim/pythonx/jedi/sith.py b/bundle/jedi-vim/pythonx/jedi/sith.py new file mode 100644 index 000000000..1a69d2ad5 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/sith.py @@ -0,0 +1,221 @@ +#!/usr/bin/env python + +""" +Sith attacks (and helps debugging) Jedi. + +Randomly search Python files and run Jedi on it. Exception and used +arguments are recorded to ``./record.json`` (specified by --record):: + + ./sith.py random /path/to/sourcecode + +Redo recorded exception:: + + ./sith.py redo + +Show recorded exception:: + + ./sith.py show + +Run a specific operation + + ./sith.py run + +Where operation is one of complete, goto, infer, get_references or get_signatures. + +Note: Line numbers start at 1; columns start at 0 (this is consistent with +many text editors, including Emacs). + +Usage: + sith.py [--pdb|--ipdb|--pudb] [-d] [-n=] [-f] [--record=] random [-s] [] + sith.py [--pdb|--ipdb|--pudb] [-d] [-f] [--record=] redo + sith.py [--pdb|--ipdb|--pudb] [-d] [-f] run + sith.py show [--record=] + sith.py -h | --help + +Options: + -h --help Show this screen. + --record= Exceptions are recorded in here [default: record.json]. + -f, --fs-cache By default, file system cache is off for reproducibility. + -n, --maxtries= Maximum of random tries [default: 100] + -d, --debug Jedi print debugging when an error is raised. + -s Shows the path/line numbers of every completion before it starts. + --pdb Launch pdb when error is raised. + --ipdb Launch ipdb when error is raised. + --pudb Launch pudb when error is raised. +""" + +from docopt import docopt # type: ignore[import] + +import json +import os +import random +import sys +import traceback + +import jedi + + +class SourceFinder(object): + _files = None + + @staticmethod + def fetch(file_path): + if not os.path.isdir(file_path): + yield file_path + return + for root, dirnames, filenames in os.walk(file_path): + for name in filenames: + if name.endswith('.py'): + yield os.path.join(root, name) + + @classmethod + def files(cls, file_path): + if cls._files is None: + cls._files = list(cls.fetch(file_path)) + return cls._files + + +class TestCase(object): + def __init__(self, operation, path, line, column, traceback=None): + if operation not in self.operations: + raise ValueError("%s is not a valid operation" % operation) + + # Set other attributes + self.operation = operation + self.path = path + self.line = line + self.column = column + self.traceback = traceback + + @classmethod + def from_cache(cls, record): + with open(record) as f: + args = json.load(f) + return cls(*args) + + # Changing this? Also update the module docstring above. + operations = ['complete', 'goto', 'infer', 'get_references', 'get_signatures'] + + @classmethod + def generate(cls, file_path): + operation = random.choice(cls.operations) + + path = random.choice(SourceFinder.files(file_path)) + with open(path) as f: + source = f.read() + lines = source.splitlines() + + if not lines: + lines = [''] + line = random.randint(1, len(lines)) + line_string = lines[line - 1] + line_len = len(line_string) + if line_string.endswith('\r\n'): + line_len -= 1 + if line_string.endswith('\n'): + line_len -= 1 + column = random.randint(0, line_len) + return cls(operation, path, line, column) + + def run(self, debugger, record=None, print_result=False): + try: + with open(self.path) as f: + self.script = jedi.Script(f.read(), path=self.path) + kwargs = {} + if self.operation == 'goto': + kwargs['follow_imports'] = random.choice([False, True]) + + self.objects = getattr(self.script, self.operation)(self.line, self.column, **kwargs) + if print_result: + print("{path}: Line {line} column {column}".format(**self.__dict__)) + self.show_location(self.line, self.column) + self.show_operation() + except Exception: + self.traceback = traceback.format_exc() + if record is not None: + call_args = (self.operation, self.path, self.line, self.column, self.traceback) + with open(record, 'w') as f: + json.dump(call_args, f) + self.show_errors() + if debugger: + einfo = sys.exc_info() + pdb = __import__(debugger) + if debugger == 'pudb': + pdb.post_mortem(einfo[2], einfo[0], einfo[1]) + else: + pdb.post_mortem(einfo[2]) + exit(1) + + def show_location(self, lineno, column, show=3): + # Three lines ought to be enough + lower = lineno - show if lineno - show > 0 else 0 + prefix = ' |' + for i, line in enumerate(self.script._code.split('\n')[lower:lineno]): + print(prefix, lower + i + 1, line) + print(prefix, ' ' * (column + len(str(lineno))), '^') + + def show_operation(self): + print("%s:\n" % self.operation.capitalize()) + if self.operation == 'complete': + self.show_completions() + else: + self.show_definitions() + + def show_completions(self): + for completion in self.objects: + print(completion.name) + + def show_definitions(self): + for completion in self.objects: + print(completion.full_name) + if completion.module_path is None: + continue + if os.path.abspath(completion.module_path) == os.path.abspath(self.path): + self.show_location(completion.line, completion.column) + + def show_errors(self): + sys.stderr.write(self.traceback) + print(("Error with running Script(...).{operation}() with\n" + "\tpath: {path}\n" + "\tline: {line}\n" + "\tcolumn: {column}").format(**self.__dict__)) + + +def main(arguments): + debugger = 'pdb' if arguments['--pdb'] else \ + 'ipdb' if arguments['--ipdb'] else \ + 'pudb' if arguments['--pudb'] else None + record = arguments['--record'] + + jedi.settings.use_filesystem_cache = arguments['--fs-cache'] + if arguments['--debug']: + jedi.set_debug_function() + + if arguments['redo'] or arguments['show']: + t = TestCase.from_cache(record) + if arguments['show']: + t.show_errors() + else: + t.run(debugger) + elif arguments['run']: + TestCase( + arguments[''], arguments[''], + int(arguments['']), int(arguments['']) + ).run(debugger, print_result=True) + else: + for _ in range(int(arguments['--maxtries'])): + t = TestCase.generate(arguments[''] or '.') + if arguments['-s']: + print('%s %s %s %s ' % (t.operation, t.path, t.line, t.column)) + sys.stdout.flush() + else: + print('.', end='') + t.run(debugger, record) + + sys.stdout.flush() + print() + + +if __name__ == '__main__': + arguments = docopt(__doc__) + main(arguments) diff --git a/bundle/jedi-vim/pythonx/jedi/test/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/completion/__init__.py new file mode 100644 index 000000000..dc4d72522 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/__init__.py @@ -0,0 +1,8 @@ +""" needed for some modules to test against packages. """ + +some_variable = 1 + + +from . import imports +#? int() +imports.relative() diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/arrays.py b/bundle/jedi-vim/pythonx/jedi/test/completion/arrays.py new file mode 100644 index 000000000..21437bcec --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/arrays.py @@ -0,0 +1,529 @@ +# ----------------- +# basic array lookups +# ----------------- + + +#? int() +[1,""][0] +#? str() +[1,""][1] +#? int() str() +[1,""][2] +#? int() str() +[1,""][20] +#? int() str() +[1,""][str(hello)] + +a = list() +#? list() +[a][0] + +#? list() +[[a,a,a]][2][100] + +c = [[a,""]] +#? str() +c[0][1] + +b = [6,7] + +#? int() +b[8-7] +# Something unreasonable: +#? int() +b[''] + +# ----------------- +# Slices +# ----------------- +#? list() +b[8:] + +#? list() +b[int():] + +#? list() +b[:] +#? int() +b[:, :-1] + +#? 3 +b[:] + +#? int() +b[:, 1] +#? int() +b[:1, 1] +#? int() +b[1:1, 1] +#? int() +b[1:1:, ...] +#? int() +b[1:1:5, ...] + +class _StrangeSlice(): + def __getitem__(self, sliced): + return sliced + +# Should not result in an error, just because the slice itself is returned. +#? slice() +_StrangeSlice()[1:2] + +for x in b[:]: + #? int() + x + +for x in b[:, :-1]: + #? + x + +class Foo: + def __getitem__(self, item): + return item + +#? +Foo()[:, :-1][0] + +# ----------------- +# iterable multiplication +# ----------------- +a = ['']*2 +#? list() +a + +# ----------------- +# tuple assignments +# ----------------- +a1, b1 = (1, "") +#? int() +a1 +#? str() +b1 + +(a2, b2) = (1, "") +#? int() +a2 +#? str() +b2 + +# list assignment +[list1, list2] = (1, "") +#? int() +list1 +#? str() +list2 + +[list3, list4] = [1, ""] +#? int() +list3 +#? str() +list4 + +# ----------------- +# subtuple assignment +# ----------------- +(a3, (b3, c3)) = (1, ("", list)) +#? list +c3 + +a4, (b4, c4) = (1, ("", list)) +#? list +c4 +#? int() +a4 +#? str() +b4 + + +# ----------------- +# multiple assignments +# ----------------- +a = b = 1 +#? int() +a +#? int() +b + +(a, b) = (c, (e, f)) = ('2', (3, 4)) +#? str() +a +#? tuple() +b +#? str() +c +#? int() +e +#? int() +f + + +# ----------------- +# unnessecary braces +# ----------------- +a = (1) +#? int() +a +#? int() +(1) +#? int() +((1)) +#? int() +((1)+1) + +u, v = 1, "" +#? int() +u + +((u1, v1)) = 1, "" +#? int() +u1 +#? int() +(u1) + +(a), b = 1, '' +#? int() +a + +def a(): return '' +#? str() +(a)() +#? str() +(a)().title() +#? int() +(tuple).index() +#? int() +(tuple)().index() + +class C(): + def __init__(self): + self.a = (str()).upper() + +#? str() +C().a + +# ----------------- +# imbalanced sides +# ----------------- +(f, g) = (1,) +#? int() +f +#? [] +g. + +(f, g, h) = (1,'') +#? int() +f +#? str() +g +#? [] +h. + +(f1, g1) = 1 +#? [] +f1. +#? [] +g1. + +(f, g) = (1,'',1.0) +#? int() +f +#? str() +g + +# ----------------- +# setitem +# ----------------- + +class F: + setitem_x = [1,2] + setitem_x[0] = 3 + +#? ['setitem_x'] +F().setitem_x +#? list() +F().setitem_x + + +# ----------------- +# dicts +# ----------------- +dic2 = {'asdf': 3, 'b': 'str'} +#? int() +dic2['asdf'] +#? None int() str() +dic2.get('asdf') + +# string literal +#? int() +dic2[r'asdf'] +#? int() +dic2[r'asdf'] +#? int() +dic2[r'as' 'd' u'f'] +#? int() str() +dic2['just_something'] + +# unpacking +a, b = dic2 +#? str() +a +a, b = {1: 'x', 2.0: 1j} +#? int() float() +a +#? int() float() +b + + +def f(): + """ github #83 """ + r = {} + r['status'] = (200, 'ok') + return r + +#? dict() +f() + +# completion within dicts +#? 9 ['str'] +{str: str} + +# iteration problem (detected with sith) +d = dict({'a':''}) +def y(a): + return a +#? +y(**d) + +#? str() +d['a'] + +# problem with more complicated casts +dic = {str(key): ''} +#? str() +dic[''] + + +for x in {1: 3.0, '': 1j}: + #? int() str() + x + +#? ['__iter__'] +dict().values().__iter__ + +d = dict(a=3, b='') +x, = d.values() +#? int() str() +x +#? int() +d['a'] +#? int() str() None +d.get('a') + +some_dct = dict({'a': 1, 'b': ''}, a=1.0) +#? float() +some_dct['a'] +#? str() +some_dct['b'] +#? int() float() str() +some_dct['c'] + +class Foo: + pass + +objects = {object(): 1, Foo: '', Foo(): 3.0} +#? int() float() str() +objects[Foo] +#? int() float() str() +objects[Foo()] +#? int() float() str() +objects[''] + +# ----------------- +# with variable as index +# ----------------- +a = (1, "") +index = 1 +#? str() +a[index] + +# these should just ouput the whole array +index = int +#? int() str() +a[index] +index = int() +#? int() str() +a[index] + +# dicts +index = 'asdf' + +dic2 = {'asdf': 3, 'b': 'str'} +#? int() +dic2[index] + +# ----------------- +# __getitem__ +# ----------------- + +class GetItem(): + def __getitem__(self, index): + return 1.0 + +#? float() +GetItem()[0] + +class GetItem(): + def __init__(self, el): + self.el = el + + def __getitem__(self, index): + return self.el + +#? str() +GetItem("")[1] + +class GetItemWithList(): + def __getitem__(self, index): + return [1, 1.0, 's'][index] + +#? float() +GetItemWithList()[1] + +for i in 0, 2: + #? int() str() + GetItemWithList()[i] + + +# With super +class SuperYeah(list): + def __getitem__(self, index): + return super()[index] + +#? +SuperYeah([1])[0] +#? +SuperYeah()[0] + +# ----------------- +# conversions +# ----------------- + +a = [1, ""] +#? int() str() +list(a)[1] + +#? int() str() +list(a)[0] +#? +set(a)[0] + +#? int() str() +list(set(a))[1] +#? int() str() +next(iter(set(a))) +#? int() str() +list(list(set(a)))[1] + +# does not yet work, because the recursion catching is not good enough (catches # to much) +#? int() str() +list(set(list(set(a))))[1] +#? int() str() +list(set(set(a)))[1] + +# frozenset +#? int() str() +list(frozenset(a))[1] +#? int() str() +list(set(frozenset(a)))[1] + +# iter +#? int() str() +list(iter(a))[1] +#? int() str() +list(iter(list(set(a))))[1] + +# tuple +#? int() str() +tuple(a)[1] +#? int() str() +tuple(list(set(a)))[1] + +#? int() +tuple((1,))[0] + +# implementation detail for lists, should not be visible +#? [] +list().__iterable + +# With a list comprehension. +for i in set(a for a in [1]): + #? int() + i + + +# ----------------- +# Merged Arrays +# ----------------- + +for x in [1] + ['']: + #? int() str() + x + +# ----------------- +# Potential Recursion Issues +# ----------------- +class X(): + def y(self): + self.a = [1] + + def x(self): + self.a = list(self.a) + #? int() + self.a[0] + +# ----------------- +# For loops with attribute assignment. +# ----------------- +def test_func(): + x = 'asdf' + for x.something in [6,7,8]: + pass + #? str() + x + + for x.something, b in [[6, 6.0]]: + pass + #? str() + x + + +#? int() +tuple({1})[0] + +# ----------------- +# PEP 3132 Extended Iterable Unpacking (star unpacking) +# ----------------- + +a, *b, c = [1, 'b', list, dict] +#? int() +a +#? +b +#? list +c + +# Not valid syntax +a, *b, *c = [1, 'd', list] +#? int() +a +#? +b +#? +c + +lc = [x for a, *x in [(1, '', 1.0)]] + +#? +lc[0][0] +#? +lc[0][1] diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/async_.py b/bundle/jedi-vim/pythonx/jedi/test/completion/async_.py new file mode 100644 index 000000000..86c5d71a5 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/async_.py @@ -0,0 +1,121 @@ +""" +Tests for all async use cases. + +Currently we're not supporting completion of them, but they should at least not +raise errors or return extremely strange results. +""" + +async def x(): + return 1 + +#? [] +x.cr_awai + +#? ['cr_await'] +x().cr_awai + +a = await x() +#? int() +a + +async def y(): + argh = await x() + #? int() + argh + #? ['__next__'] + x().__await__().__next + return 2 + +class A(): + @staticmethod + async def b(c=1, d=2): + return 1 + +#! 9 ['def b'] +await A.b() + +#! 11 ['param d=2'] +await A.b(d=3) + +class Awaitable: + def __await__(self): + yield None + return '' + +async def awaitable_test(): + foo = await Awaitable() + #? str() + foo + +# python >= 3.6 + +async def asgen(): + yield 1 + await asyncio.sleep(0) + yield 2 + +async def wrapper(): + #? int() + [x async for x in asgen()][0] + + async for y in asgen(): + #? int() + y + +#? ['__anext__'] +asgen().__ane +#? [] +asgen().mro + + +# Normal completion (#1092) +normal_var1 = 42 + +async def foo(): + normal_var2 = False + #? ['normal_var1', 'normal_var2'] + normal_var + + +class C: + @classmethod + async def async_for_classmethod(cls) -> "C": + return + + async def async_for_method(cls) -> int: + return + + +async def f(): + c = await C.async_for_method() + #? int() + c + d = await C().async_for_method() + #? int() + d + + e = await C.async_for_classmethod() + #? C() + e + f = await C().async_for_classmethod() + #? C() + f + + +class AsyncCtxMgr: + def some_method(): + pass + + async def __aenter__(self): + return self + + async def __aexit__(self, *args): + pass + + +async def asyncctxmgr(): + async with AsyncCtxMgr() as acm: + #? AsyncCtxMgr() + acm + #? ['some_method'] + acm.som diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/basic.py b/bundle/jedi-vim/pythonx/jedi/test/completion/basic.py new file mode 100644 index 000000000..97bfeecda --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/basic.py @@ -0,0 +1,422 @@ +# ----------------- +# cursor position +# ----------------- +#? 0 int +int() +#? 3 int +int() +#? 4 str +int(str) + + +# ----------------- +# should not complete +# ----------------- +#? [] +. +#? [] +str.. +#? [] +a(0):. +#? 2 [] +0x0 +#? [] +1j +#? ['and', 'or', 'if', 'is', 'in', 'not'] +1j +x = None() +#? +x + +# ----------------- +# if/else/elif +# ----------------- + +if (random.choice([0, 1])): + 1 +elif(random.choice([0, 1])): + a = 3 +else: + a = '' +#? int() str() +a +def func(): + if random.choice([0, 1]): + 1 + elif(random.choice([0, 1])): + a = 3 + else: + a = '' + #? int() str() + return a +#? int() str() +func() + +# ----------------- +# keywords +# ----------------- + +#? list() +assert [] + +def focus_return(): + #? list() + return [] + + +# ----------------- +# for loops +# ----------------- + +for a in [1,2]: + #? int() + a + +for a1 in 1,"": + #? int() str() + a1 + +for a3, b3 in (1,""), (1,""), (1,""): + #? int() + a3 + #? str() + b3 +for (a3, b3) in (1,""), (1,""), (1,""): + #? int() + a3 + #? str() + b3 + +for a4, (b4, c4) in (1,("", list)), (1,("", list)): + #? int() + a4 + #? str() + b4 + #? list + c4 + +a = [] +for i in [1,'']: + #? int() str() + i + a += [i] + +#? int() str() +a[0] + +for i in list([1,'']): + #? int() str() + i + +#? int() str() +for x in [1,'']: x + +a = [] +b = [1.0,''] +for i in b: + a += [i] + +#? float() str() +a[0] + +for i in [1,2,3]: + #? int() + i +else: + i + + +# ----------------- +# range() +# ----------------- +for i in range(10): + #? int() + i + +# ----------------- +# ternary operator +# ----------------- + +a = 3 +b = '' if a else set() +#? str() set() +b + +def ret(a): + return ['' if a else set()] + +#? str() set() +ret(1)[0] +#? str() set() +ret()[0] + +# ----------------- +# global vars +# ----------------- + +def global_define(): + #? int() + global global_var_in_func + global_var_in_func = 3 + +#? int() +global_var_in_func + +#? ['global_var_in_func'] +global_var_in_f + + +def funct1(): + # From issue #610 + global global_dict_var + global_dict_var = dict() +def funct2(): + #! ['global_dict_var', 'global_dict_var'] + global global_dict_var + #? dict() + global_dict_var + + +global_var_predefined = None + +def init_global_var_predefined(): + global global_var_predefined + if global_var_predefined is None: + global_var_predefined = 3 + +#? int() None +global_var_predefined + + +def global_as_import(): + from import_tree import globals + #? ['foo'] + globals.foo + #? int() + globals.foo + + +global r +r = r[r] +if r: + r += r + 2 + #? int() + r + +# ----------------- +# del +# ----------------- + +deleted_var = 3 +del deleted_var +#? +deleted_var +#? [] +deleted_var +#! [] +deleted_var + +# ----------------- +# within docstrs +# ----------------- + +def a(): + """ + #? [] + global_define + #? + str + """ + pass + +#? +# str literals in comment """ upper + +def completion_in_comment(): + #? ['Exception'] + # might fail because the comment is not a leaf: Exception + pass + +some_word +#? ['Exception'] +# Very simple comment completion: Exception +# Commment after it + +# ----------------- +# magic methods +# ----------------- + +class A(object): pass +class B(): pass + +#? ['__init__'] +A.__init__ +#? ['__init__'] +B.__init__ + +#? ['__init__'] +int().__init__ + +# ----------------- +# comments +# ----------------- + +class A(): + def __init__(self): + self.hello = {} # comment shouldn't be a string +#? dict() +A().hello + +# ----------------- +# unicode +# ----------------- +a = 'smörbröd' +#? str() +a +xyz = 'smörbröd.py' +if 1: + #? str() + xyz + +#? +¹. + +# ----------------- +# exceptions +# ----------------- +try: + import math +except ImportError as i_a: + #? ['i_a'] + i_a + #? ImportError() + i_a + + +class MyException(Exception): + def __init__(self, my_attr): + self.my_attr = my_attr + +try: + raise MyException(1) +except MyException as e: + #? ['my_attr'] + e.my_attr + #? 22 ['my_attr'] + for x in e.my_attr: + pass + +# ----------------- +# params +# ----------------- + +my_param = 1 +#? 9 str() +def foo1(my_param): + my_param = 3.0 +foo1("") + +my_type = float() +#? 20 float() +def foo2(my_param: my_type): + pass +foo2("") +#? 20 int() +def foo3(my_param=my_param): + pass +foo3("") + +some_default = '' +#? [] +def foo(my_t +#? [] +def foo(my_t, my_ty +#? ['some_default'] +def foo(my_t=some_defa +#? ['some_default'] +def foo(my_t=some_defa, my_t2=some_defa + +#? ['my_type'] +def foo(my_t: lala=some_defa, my_t2: my_typ +#? ['my_type'] +def foo(my_t: lala=some_defa, my_t2: my_typ +#? [] +def foo(my_t: lala=some_defa, my_t + +#? [] +lambda my_t +#? [] +lambda my_, my_t +#? ['some_default'] +lambda x=some_defa +#? ['some_default'] +lambda y, x=some_defa + +# Just make sure we're not in some weird parsing recovery after opening brackets +def + +# ----------------- +# continuations +# ----------------- + +foo = \ +1 +#? int() +foo + +# ----------------- +# module attributes +# ----------------- + +# Don't move this to imports.py, because there's a star import. +#? str() +__file__ +#? ['__file__'] +__file__ + +#? str() +math.__file__ +# Should not lead to errors +#? +math() + +# ----------------- +# with statements +# ----------------- + +with open('') as f: + #? ['closed'] + f.closed + for line in f: + #? str() bytes() + line + +with open('') as f1, open('') as f2: + #? ['closed'] + f1.closed + #? ['closed'] + f2.closed + + +class Foo(): + def __enter__(self): + return '' + +#? 14 str() +with Foo() as f3: + #? str() + f3 +#! 14 ['with Foo() as f3: f3'] +with Foo() as f3: + f3 +#? 6 Foo +with Foo() as f3: + f3 + +# ----------------- +# Avoiding multiple definitions +# ----------------- + +some_array = ['', ''] +#! ['def upper'] +some_array[some_not_defined_index].upper diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/classes.py b/bundle/jedi-vim/pythonx/jedi/test/completion/classes.py new file mode 100644 index 000000000..9f1468b9f --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/classes.py @@ -0,0 +1,654 @@ +def find_class(): + """ This scope is special, because its in front of TestClass """ + #? ['ret'] + TestClass.ret + if 1: + #? ['ret'] + TestClass.ret + +class FindClass(): + #? [] + TestClass.ret + if a: + #? [] + TestClass.ret + + def find_class(self): + #? ['ret'] + TestClass.ret + if 1: + #? ['ret'] + TestClass.ret + +#? [] +FindClass().find_class.self +#? [] +FindClass().find_class.self.find_class + +# set variables, which should not be included, because they don't belong to the +# class +second = 1 +second = "" +class TestClass(object): + var_class = TestClass(1) + self.pseudo_var = 3 + + def __init__(self2, first_param, second_param, third=1.0): + self2.var_inst = first_param + self2.second = second_param + self2.first = first_param + self2.first.var_on_argument = 5 + a = 3 + + def var_func(self): + return 1 + + def get_first(self): + # traversal + self.second_new = self.second + return self.var_inst + + def values(self): + self.var_local = 3 + #? ['var_class', 'var_func', 'var_inst', 'var_local'] + self.var_ + #? + var_local + + def ret(self, a1): + # should not know any class functions! + #? [] + values + #? + values + #? ['return'] + ret + return a1 + +# should not work +#? [] +var_local +#? [] +var_inst +#? [] +var_func + +# instance +inst = TestClass(1) + +#? ['var_class', 'var_func', 'var_inst', 'var_local'] +inst.var + +#? ['var_class', 'var_func'] +TestClass.var + +#? int() +inst.var_local +#? [] +TestClass.var_local. +#? +TestClass.pseudo_var +#? +TestClass().pseudo_var + +#? int() +TestClass().ret(1) +# Should not return int(), because we want the type before `.ret(1)`. +#? 11 TestClass() +TestClass().ret(1) +#? int() +inst.ret(1) + +myclass = TestClass(1, '', 3.0) +#? int() +myclass.get_first() +#? [] +myclass.get_first.real + +# too many params +#? int() +TestClass(1,1,1).var_inst + +# too few params +#? int() +TestClass(1).first +#? [] +TestClass(1).second. + +# complicated variable settings in class +#? str() +myclass.second +#? str() +myclass.second_new + +# multiple classes / ordering +ints = TestClass(1, 1.0) +strs = TestClass("", '') +#? float() +ints.second +#? str() +strs.second + +#? ['var_class'] +TestClass.var_class.var_class.var_class.var_class + +# operations (+, *, etc) shouldn't be InstanceElements - #246 +class A(): + def __init__(self): + self.addition = 1 + 2 +#? int() +A().addition + +# should also work before `=` +#? 8 int() +A().addition = None +#? 8 int() +A(1).addition = None +#? 1 A +A(1).addition = None +a = A() +#? 8 int() +a.addition = None + + +# ----------------- +# inheritance +# ----------------- + +class Base(object): + def method_base(self): + return 1 + +class SuperClass(Base): + class_super = 3 + def __init__(self): + self.var_super = '' + def method_super(self): + self.var2_super = list + +class Mixin(SuperClass): + def method_mixin(self): + return int + +#? 20 SuperClass +class SubClass(SuperClass): + class_sub = 3 + def __init__(self): + self.var_sub = '' + def method_sub(self): + self.var_sub = list + return tuple + +instance = SubClass() + +#? ['method_base', 'method_sub', 'method_super'] +instance.method_ +#? ['var2_super', 'var_sub', 'var_super'] +instance.var +#? ['class_sub', 'class_super'] +instance.class_ + +#? ['method_base', 'method_sub', 'method_super'] +SubClass.method_ +#? [] +SubClass.var +#? ['class_sub', 'class_super'] +SubClass.class_ + +# ----------------- +# inheritance of builtins +# ----------------- + +class Base(str): + pass + +#? ['upper'] +Base.upper +#? ['upper'] +Base().upper + +# ----------------- +# dynamic inheritance +# ----------------- + +class Angry(object): + def shout(self): + return 'THIS IS MALARKEY!' + +def classgetter(): + return Angry + +class Dude(classgetter()): + def react(self): + #? ['shout'] + self.s + +# ----------------- +# multiple inheritance # 1071 +# ----------------- + +class FactorMixin(object): + FACTOR_1 = 0.1 + +class Calc(object): + def sum(self, a, b): + self.xxx = 3 + return a + b + +class BetterCalc(Calc, FactorMixin): + def multiply_factor(self, a): + return a * self.FACTOR_1 + +calc = BetterCalc() +#? ['sum'] +calc.sum +#? ['multiply_factor'] +calc.multip +#? ['FACTOR_1'] +calc.FACTOR_1 +#? ['xxx'] +calc.xxx + +# ----------------- +# __call__ +# ----------------- + +class CallClass(): + def __call__(self): + return 1 + +#? int() +CallClass()() + +# ----------------- +# variable assignments +# ----------------- + +class V: + def __init__(self, a): + self.a = a + + def ret(self): + return self.a + + d = b + b = ret + if 1: + c = b + +#? int() +V(1).b() +#? int() +V(1).c() +#? +V(1).d() +# Only keywords should be possible to complete. +#? ['is', 'in', 'not', 'and', 'or', 'if'] +V(1).d() + + +# ----------------- +# ordering +# ----------------- +class A(): + def b(self): + #? int() + a_func() + #? str() + self.a_func() + return a_func() + + def a_func(self): + return "" + +def a_func(): + return 1 + +#? int() +A().b() +#? str() +A().a_func() + +# ----------------- +# nested classes +# ----------------- +class A(): + class B(): + pass + def b(self): + return 1.0 + +#? float() +A().b() + +class A(): + def b(self): + class B(): + def b(self): + return [] + return B().b() + +#? list() +A().b() + +# ----------------- +# ducktyping +# ----------------- + +def meth(self): + return self.a, self.b + +class WithoutMethod(): + a = 1 + def __init__(self): + self.b = 1.0 + def blub(self): + return self.b + m = meth + +class B(): + b = '' + +a = WithoutMethod().m() +#? int() +a[0] +#? float() +a[1] + +#? float() +WithoutMethod.blub(WithoutMethod()) +#? str() +WithoutMethod.blub(B()) + +# ----------------- +# __getattr__ / getattr() / __getattribute__ +# ----------------- + +#? str().upper +getattr(str(), 'upper') +#? str.upper +getattr(str, 'upper') + +# some strange getattr calls +#? +getattr(str, 1) +#? +getattr() +#? +getattr(str) +#? +getattr(getattr, 1) +#? +getattr(str, []) + + +class Base(): + def ret(self, b): + return b + +class Wrapper(): + def __init__(self, obj): + self.obj = obj + + def __getattr__(self, name): + return getattr(self.obj, name) + +class Wrapper2(): + def __getattribute__(self, name): + return getattr(Base(), name) + +#? int() +Wrapper(Base()).ret(3) +#? ['ret'] +Wrapper(Base()).ret +#? int() +Wrapper(Wrapper(Base())).ret(3) +#? ['ret'] +Wrapper(Wrapper(Base())).ret + +#? int() +Wrapper2(Base()).ret(3) + +class GetattrArray(): + def __getattr__(self, name): + return [1] + +#? int() +GetattrArray().something[0] +#? [] +GetattrArray().something + +class WeirdGetattr: + class __getattr__(): + pass + +#? [] +WeirdGetattr().something + + +# ----------------- +# private vars +# ----------------- +class PrivateVar(): + def __init__(self): + self.__var = 1 + #? int() + self.__var + #? ['__var'] + self.__var + + def __private_func(self): + return 1 + + #? int() + __private_func() + + def wrap_private(self): + return self.__private_func() +#? [] +PrivateVar().__var +#? +PrivateVar().__var +#? [] +PrivateVar().__private_func +#? [] +PrivateVar.__private_func +#? int() +PrivateVar().wrap_private() + + +class PrivateSub(PrivateVar): + def test(self): + #? [] + self.__var + + def wrap_private(self): + #? [] + self.__var + +#? [] +PrivateSub().__var + +# ----------------- +# super +# ----------------- +class Super(object): + a = 3 + def return_sup(self): + return 1 +SuperCopy = Super + +class TestSuper(Super): + #? + super() + def test(self): + #? SuperCopy() + super() + #? ['a'] + super().a + if 1: + #? SuperCopy() + super() + def a(): + #? + super() + + def return_sup(self): + #? int() + return super().return_sup() + +#? int() +TestSuper().return_sup() + + +Super = 3 + +class Foo(): + def foo(self): + return 1 +# Somehow overwriting the same name caused problems (#1044) +class Foo(Foo): + def foo(self): + #? int() + super().foo() + +# ----------------- +# if flow at class level +# ----------------- +class TestX(object): + def normal_method(self): + return 1 + + if True: + def conditional_method(self): + var = self.normal_method() + #? int() + var + return 2 + + def other_method(self): + var = self.conditional_method() + #? int() + var + +# ----------------- +# mro method +# ----------------- + +class A(object): + a = 3 + +#? ['mro'] +A.mro +#? [] +A().mro + + +# ----------------- +# mro resolution +# ----------------- + +class B(A()): + b = 3 + +#? +B.a +#? +B().a +#? int() +B.b +#? int() +B().b + + +# ----------------- +# With import +# ----------------- + +from import_tree.classes import Config2, BaseClass + +class Config(BaseClass): + """#884""" + +#? Config2() +Config.mode + +#? int() +Config.mode2 + + +# ----------------- +# Nested class/def/class +# ----------------- +class Foo(object): + a = 3 + def create_class(self): + class X(): + a = self.a + self.b = 3.0 + return X + +#? int() +Foo().create_class().a +#? float() +Foo().b + +class Foo(object): + def comprehension_definition(self): + return [1 for self.b in [1]] + +#? int() +Foo().b + +# ----------------- +# default arguments +# ----------------- + +default = '' +class DefaultArg(): + default = 3 + def x(self, arg=default): + #? str() + default + return arg + def y(self): + return default + +#? int() +DefaultArg().x() +#? str() +DefaultArg().y() +#? int() +DefaultArg.x() +#? str() +DefaultArg.y() + + +# ----------------- +# Error Recovery +# ----------------- + +from import_tree.pkg.base import MyBase + +class C1(MyBase): + def f3(self): + #! 13 ['def f1'] + self.f1() . # hey''' + #? 13 MyBase.f1 + self.f1() . # hey''' + +# ----------------- +# With a very weird __init__ +# ----------------- + +class WithWeirdInit: + class __init__: + def __init__(self, a): + self.a = a + + def y(self): + return self.a + + +#? +WithWeirdInit(1).y() diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/completion.py b/bundle/jedi-vim/pythonx/jedi/test/completion/completion.py new file mode 100644 index 000000000..f509a19c7 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/completion.py @@ -0,0 +1,50 @@ +""" +Special cases of completions (typically special positions that caused issues +with value parsing. +""" + +def pass_decorator(func): + return func + + +def x(): + return ( + 1, +#? ["tuple"] +tuple + ) + + # Comment just somewhere + + +class MyClass: + @pass_decorator + def x(foo, +#? 5 [] +tuple, + ): + return 1 + + +if x: + pass +#? ['else'] +else + +try: + pass +#? ['except', 'Exception'] +except + +try: + pass +#? 6 ['except', 'Exception'] +except AttributeError: + pass +#? ['finally'] +finally + +for x in y: + pass +#? ['else'] +else diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/complex.py b/bundle/jedi-vim/pythonx/jedi/test/completion/complex.py new file mode 100644 index 000000000..e8327f8e0 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/complex.py @@ -0,0 +1,37 @@ +""" Mostly for stupid error reports of @dbrgn. :-) """ + +import time + +class Foo(object): + global time + asdf = time + +def asdfy(): + return Foo + +xorz = getattr(asdfy()(), 'asdf') +#? time +xorz + + + +def args_returner(*args): + return args + + +#? tuple() +args_returner(1)[:] +#? int() +args_returner(1)[:][0] + + +def kwargs_returner(**kwargs): + return kwargs + + +# TODO This is not really correct, needs correction probably at some point, but +# at least it doesn't raise an error. +#? int() +kwargs_returner(a=1)[:] +#? +kwargs_returner(b=1)[:][0] diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/comprehensions.py b/bundle/jedi-vim/pythonx/jedi/test/completion/comprehensions.py new file mode 100644 index 000000000..e0d4054c6 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/comprehensions.py @@ -0,0 +1,260 @@ +# ----------------- +# list comprehensions +# ----------------- + +# basics: + +a = ['' for a in [1]] +#? str() +a[0] +#? ['insert'] +a.insert + +a = [a for a in [1]] +#? int() +a[0] + +y = 1.0 +# Should not leak. +[y for y in [3]] +#? float() +y + +a = [a for a in (1, 2)] +#? int() +a[0] + +a = [a for a,b in [(1,'')]] +#? int() +a[0] +a = [a for (a,b) in [(1,'')]] +#? int() +a[0] + +arr = [1,''] +a = [a for a in arr] +#? int() +a[0] +#? str() +a[1] +#? int() str() +a[2] + +a = [a if 1.0 else '' for a in [1] if [1.0]] +#? int() str() +a[0] + +# name resolve should be correct +left, right = 'a', 'b' +left, right = [x for x in (left, right)] +#? str() +left + +# with a dict literal +#? int() +[a for a in {1:'x'}][0] + +# list comprehensions should also work in combination with functions +def _listen(arg): + for x in arg: + #? str() + x + +_listen(['' for x in [1]]) +#? +([str for x in []])[0] + +# ----------------- +# nested list comprehensions +# ----------------- + +b = [a for arr in [[1, 1.0]] for a in arr] +#? int() +b[0] +#? float() +b[1] + +b = [arr for arr in [[1, 1.0]] for a in arr] +#? int() +b[0][0] +#? float() +b[1][1] + +b = [a for arr in [[1]] if '' for a in arr if ''] +#? int() +b[0] + +b = [b for arr in [[[1.0]]] for a in arr for b in a] +#? float() +b[0] + +#? str() +[x for x in 'chr'][0] + +# From GitHub #26 +#? list() +a = [[int(v) for v in line.strip().split() if v] for line in ["123", str(), "123"] if line] +#? list() +a[0] +#? int() +a[0][0] + +# From GitHub #1524 +#? +[nothing for nothing, _ in [1]][0] + +# ----------------- +# generator comprehensions +# ----------------- + +left, right = (i for i in (1, '')) + +#? int() +left +#? str() +right + +gen = (i for i in (1,)) + +#? int() +next(gen) +#? +gen[0] + +gen = (a for arr in [[1.0]] for a in arr) +#? float() +next(gen) + +#? int() +(i for i in (1,)).send() + +# issues with different formats +left, right = (i for i in + ('1', 2)) +#? str() +left +#? int() +right + +# ----------------- +# name resolution in comprehensions. +# ----------------- + +def x(): + """Should not try to resolve to the if hio, which was a bug.""" + #? 22 + [a for a in h if hio] + if hio: pass + +# ----------------- +# slices +# ----------------- + +#? list() +foo = [x for x in [1, '']][:1] +#? int() +foo[0] +#? str() +foo[1] + +# ----------------- +# In class +# ----------------- + +class X(): + def __init__(self, bar): + self.bar = bar + + def foo(self): + x = [a for a in self.bar][0] + #? int() + x + return x + +#? int() +X([1]).foo() + +# ----------------- +# dict comprehensions +# ----------------- + +#? int() +list({a - 1: 3 for a in [1]})[0] + +d = {a - 1: b for a, b in {1: 'a', 3: 1.0}.items()} +#? int() +list(d)[0] +#? str() float() +d.values()[0] +#? str() +d[0] +#? float() str() +d[1] +#? float() +d[2] + +# ----------------- +# set comprehensions +# ----------------- + +#? set() +{a - 1 for a in [1]} + +#? set() +{a for a in range(10)} + +#? int() +[x for x in {a for a in range(10)}][0] + +#? int() +{a for a in range(10)}.pop() +#? float() str() +{b for a in [[3.0], ['']] for b in a}.pop() + +#? int() +next(iter({a for a in range(10)})) + + +#? int() +[a for a in {1, 2, 3}][0] + +# ----------------- +# syntax errors +# ----------------- + +# Issue #1146 + +#? ['list'] +[int(str(x.value) for x in list + +def reset_missing_bracket(): pass + + +# ----------------- +# function calls +# ----------------- + +def foo(arg): + return arg + + +x = foo(x for x in [1]) + +#? int() +next(x) +#? +x[0] + +# While it's illegal to have more than one argument, when a generator +# expression is involved, it's still a valid parse tree and Jedi should still +# work (and especially not raise Exceptions). It's debatable wheter inferring +# values for invalid statements is a good idea, but not failing is a must. + +#? int() +next(foo(x for x in [1], 1)) + +def bar(x, y): + return y + +#? str() +next(bar(x for x in [1], x for x in [''])) diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/conftest.py b/bundle/jedi-vim/pythonx/jedi/test/completion/conftest.py new file mode 100644 index 000000000..c7c14978f --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/conftest.py @@ -0,0 +1,29 @@ +# Exists only for completion/pytest.py + +import pytest + + +@pytest.fixture() +def my_other_conftest_fixture(): + return 1.0 + + +@pytest.fixture() +def my_conftest_fixture(my_other_conftest_fixture): + return my_other_conftest_fixture + + +def my_not_existing_fixture(): + return 3 # Just a normal function + + +@pytest.fixture() +def inheritance_fixture(): + return '' + + +@pytest.fixture +def testdir(testdir): + #? ['chdir'] + testdir.chdir + return testdir diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/context.py b/bundle/jedi-vim/pythonx/jedi/test/completion/context.py new file mode 100644 index 000000000..d77c79c39 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/context.py @@ -0,0 +1,55 @@ +class Base(): + myfoobar = 3 + + +class X(Base): + def func(self, foo): + pass + + +class Y(X): + def actual_function(self): + pass + + #? [] + def actual_function + #? ['func'] + def f + + #? ['__doc__'] + __doc__ + #? [] + def __doc__ + + # This might or might not be what we wanted, currently properties are also + # used like this. IMO this is not wanted ~dave. + #? ['__class__'] + def __class__ + #? [] + __class__ + + + #? ['__repr__'] + def __repr__ + + #? [] + def mro + + #? ['myfoobar'] + myfoobar + +#? [] +myfoobar + +# ----------------- +# Inheritance +# ----------------- + +class Super(): + enabled = True + if enabled: + yo_dude = 4 + +class Sub(Super): + #? ['yo_dude'] + yo_dud diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/decorators.py b/bundle/jedi-vim/pythonx/jedi/test/completion/decorators.py new file mode 100644 index 000000000..7fce91da7 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/decorators.py @@ -0,0 +1,345 @@ +# ----------------- +# normal decorators +# ----------------- + +def decorator(func): + def wrapper(*args): + return func(1, *args) + return wrapper + +@decorator +def decorated(a,b): + return a,b + +exe = decorated(set, '') + +#? set +exe[1] + +#? int() +exe[0] + +# more complicated with args/kwargs +def dec(func): + def wrapper(*args, **kwargs): + return func(*args, **kwargs) + return wrapper + +@dec +def fu(a, b, c, *args, **kwargs): + return a, b, c, args, kwargs + +exe = fu(list, c=set, b=3, d='') + +#? list +exe[0] +#? int() +exe[1] +#? set +exe[2] +#? [] +exe[3][0]. +#? str() +exe[4]['d'] + + +exe = fu(list, set, 3, '', d='') + +#? str() +exe[3][0] + +# ----------------- +# multiple decorators +# ----------------- +def dec2(func2): + def wrapper2(first_arg, *args2, **kwargs2): + return func2(first_arg, *args2, **kwargs2) + return wrapper2 + +@dec2 +@dec +def fu2(a, b, c, *args, **kwargs): + return a, b, c, args, kwargs + +exe = fu2(list, c=set, b=3, d='str') + +#? list +exe[0] +#? int() +exe[1] +#? set +exe[2] +#? [] +exe[3][0]. +#? str() +exe[4]['d'] + + +# ----------------- +# Decorator is a class +# ----------------- +def same_func(func): + return func + +class Decorator(object): + def __init__(self, func): + self.func = func + + def __call__(self, *args, **kwargs): + return self.func(1, *args, **kwargs) + +@Decorator +def nothing(a,b,c): + return a,b,c + +#? int() +nothing("")[0] +#? str() +nothing("")[1] + + +@same_func +@Decorator +def nothing(a,b,c): + return a,b,c + +#? int() +nothing("")[0] + +class MethodDecoratorAsClass(): + class_var = 3 + @Decorator + def func_without_self(arg, arg2): + return arg, arg2 + + @Decorator + def func_with_self(self, arg): + return self.class_var + +#? int() +MethodDecoratorAsClass().func_without_self('')[0] +#? str() +MethodDecoratorAsClass().func_without_self('')[1] +#? +MethodDecoratorAsClass().func_with_self(1) + + +class SelfVars(): + """Init decorator problem as an instance, #247""" + @Decorator + def __init__(self): + """ + __init__ decorators should be ignored when looking up variables in the + class. + """ + self.c = list + + @Decorator + def shouldnt_expose_var(not_self): + """ + Even though in real Python this shouldn't expose the variable, in this + case Jedi exposes the variable, because these kind of decorators are + normally descriptors, which SHOULD be exposed (at least 90%). + """ + not_self.b = 1.0 + + def other_method(self): + #? float() + self.b + #? list + self.c + +# ----------------- +# not found decorators (are just ignored) +# ----------------- +@not_found_decorator +def just_a_func(): + return 1 + +#? int() +just_a_func() + +#? ['__closure__'] +just_a_func.__closure__ + + +class JustAClass: + @not_found_decorator2 + def a(self): + return 1 + +#? ['__call__'] +JustAClass().a.__call__ +#? int() +JustAClass().a() +#? ['__call__'] +JustAClass.a.__call__ +#? int() +JustAClass.a() + +# ----------------- +# illegal decorators +# ----------------- + +class DecoratorWithoutCall(): + def __init__(self, func): + self.func = func + +@DecoratorWithoutCall +def f(): + return 1 + +# cannot be resolved - should be ignored +@DecoratorWithoutCall(None) +def g(): + return 1 + +#? +f() +#? int() +g() + + +class X(): + @str + def x(self): + pass + + def y(self): + #? str() + self.x + #? + self.x() + + +def decorator_var_args(function, *args): + return function(*args) + +@decorator_var_args +def function_var_args(param): + return param + +#? int() +function_var_args(1) + +# ----------------- +# method decorators +# ----------------- + +def dec(f): + def wrapper(s): + return f(s) + return wrapper + +class MethodDecorators(): + _class_var = 1 + def __init__(self): + self._method_var = '' + + @dec + def constant(self): + return 1.0 + + @dec + def class_var(self): + return self._class_var + + @dec + def method_var(self): + return self._method_var + +#? float() +MethodDecorators().constant() +#? int() +MethodDecorators().class_var() +#? str() +MethodDecorators().method_var() + + +class Base(): + @not_existing + def __init__(self): + pass + @not_existing + def b(self): + return '' + @dec + def c(self): + return 1 + +class MethodDecoratorDoesntExist(Base): + """#272 github: combination of method decorators and super()""" + def a(self): + #? + super().__init__() + #? str() + super().b() + #? int() + super().c() + #? float() + self.d() + + @doesnt_exist + def d(self): + return 1.0 + +# ----------------- +# others +# ----------------- +def memoize(function): + def wrapper(*args): + if random.choice([0, 1]): + pass + else: + rv = function(*args) + return rv + return wrapper + +@memoize +def follow_statement(stmt): + return stmt + +# here we had problems with the else clause, because the parent was not right. +#? int() +follow_statement(1) + +# ----------------- +# class decorators +# ----------------- + +# class decorators should just be ignored +@should_ignore +class A(): + x = 3 + def ret(self): + return 1 + +#? int() +A().ret() +#? int() +A().x + + +# ----------------- +# On decorator completions +# ----------------- + +import abc +#? ['abc'] +@abc + +#? ['abstractmethod'] +@abc.abstractmethod + +# ----------------- +# Goto +# ----------------- +x = 1 + +#! 5 [] +@x.foo() +def f(): pass + +#! 1 ['x = 1'] +@x.foo() +def f(): pass diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/descriptors.py b/bundle/jedi-vim/pythonx/jedi/test/completion/descriptors.py new file mode 100644 index 000000000..3cc01faff --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/descriptors.py @@ -0,0 +1,221 @@ +class RevealAccess(object): + """ + A data descriptor that sets and returns values + normally and prints a message logging their access. + """ + def __init__(self, initval=None, name='var'): + self.val = initval + self.name = name + + def __get__(self, obj, objtype): + print('Retrieving', self.name) + return self.val + + def __set__(self, obj, val): + print('Updating', self.name) + self.val = val + + def just_a_method(self): + pass + +class C(object): + x = RevealAccess(10, 'var "x"') + #? RevealAccess() + x + #? ['just_a_method'] + x.just_a_method + y = 5.0 + def __init__(self): + #? int() + self.x + + #? [] + self.just_a_method + #? [] + C.just_a_method + +m = C() +#? int() +m.x +#? float() +m.y +#? int() +C.x + +#? [] +m.just_a_method +#? [] +C.just_a_method + +# ----------------- +# properties +# ----------------- +class B(): + @property + def r(self): + return 1 + @r.setter + def r(self, value): + return '' + def t(self): + return '' + p = property(t) + +#? [] +B().r(). +#? int() +B().r + +#? str() +B().p +#? [] +B().p(). + +class PropClass(): + def __init__(self, a): + self.a = a + @property + def ret(self): + return self.a + + @ret.setter + def ret(self, value): + return 1.0 + + def ret2(self): + return self.a + ret2 = property(ret2) + + @property + def nested(self): + """ causes recusions in properties, should work """ + return self.ret + + @property + def nested2(self): + """ causes recusions in properties, should not work """ + return self.nested2 + + @property + def join1(self): + """ mutual recusion """ + return self.join2 + + @property + def join2(self): + """ mutual recusion """ + return self.join1 + +#? str() +PropClass("").ret +#? [] +PropClass().ret. + +#? str() +PropClass("").ret2 +#? +PropClass().ret2 + +#? int() +PropClass(1).nested +#? [] +PropClass().nested. + +#? +PropClass(1).nested2 +#? [] +PropClass().nested2. + +#? +PropClass(1).join1 +# ----------------- +# staticmethod/classmethod +# ----------------- + +class E(object): + a = '' + def __init__(self, a): + self.a = a + + def f(x): + return x + f = staticmethod(f) + #? + f.__func + + @staticmethod + def g(x): + return x + + def s(cls, x): + return x + s = classmethod(s) + + @classmethod + def t(cls, x): + return x + + @classmethod + def u(cls, x): + return cls.a + +e = E(1) +#? int() +e.f(1) +#? int() +E.f(1) +#? int() +e.g(1) +#? int() +E.g(1) + +#? int() +e.s(1) +#? int() +E.s(1) +#? int() +e.t(1) +#? int() +E.t(1) + +#? str() +e.u(1) +#? str() +E.u(1) + +# ----------------- +# Conditions +# ----------------- + +from functools import partial + + +class Memoize(): + def __init__(self, func): + self.func = func + + def __get__(self, obj, objtype): + if obj is None: + return self.func + + return partial(self, obj) + + def __call__(self, *args, **kwargs): + # We don't do caching here, but that's what would normally happen. + return self.func(*args, **kwargs) + + +class MemoizeTest(): + def __init__(self, x): + self.x = x + + @Memoize + def some_func(self): + return self.x + + +#? int() +MemoizeTest(10).some_func() +# Now also call the same function over the class (see if clause above). +#? float() +MemoizeTest.some_func(MemoizeTest(10.0)) diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/django.py b/bundle/jedi-vim/pythonx/jedi/test/completion/django.py new file mode 100644 index 000000000..533cab6e0 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/django.py @@ -0,0 +1,292 @@ +import datetime +import decimal +import uuid + +from django.db import models +from django.contrib.auth.models import User +from django.db.models.query_utils import DeferredAttribute + + +class TagManager(models.Manager): + def specially_filtered_tags(self): + return self.all() + + +class Tag(models.Model): + tag_name = models.CharField() + + objects = TagManager() + + custom_objects = TagManager() + + +class Category(models.Model): + category_name = models.CharField() + + +class AttachedData(models.Model): + extra_data = models.TextField() + + +class BusinessModel(models.Model): + attached_o2o = models.OneToOneField(AttachedData) + + category_fk = models.ForeignKey(Category) + category_fk2 = models.ForeignKey('Category') + category_fk3 = models.ForeignKey(1) + category_fk4 = models.ForeignKey('models') + category_fk5 = models.ForeignKey() + + integer_field = models.IntegerField() + big_integer_field = models.BigIntegerField() + positive_integer_field = models.PositiveIntegerField() + small_integer_field = models.SmallIntegerField() + char_field = models.CharField() + text_field = models.TextField() + email_field = models.EmailField() + ip_address_field = models.GenericIPAddressField() + url_field = models.URLField() + float_field = models.FloatField() + binary_field = models.BinaryField() + boolean_field = models.BooleanField() + decimal_field = models.DecimalField() + time_field = models.TimeField() + duration_field = models.DurationField() + date_field = models.DateField() + date_time_field = models.DateTimeField() + uuid_field = models.UUIDField() + tags_m2m = models.ManyToManyField(Tag) + + unidentifiable = NOT_FOUND + + #? models.IntegerField() + integer_field + + def method(self): + return 42 + +# ----------------- +# Model attribute inference +# ----------------- + +#? DeferredAttribute() +BusinessModel.integer_field +#? DeferredAttribute() +BusinessModel.tags_m2m +#? DeferredAttribute() +BusinessModel.email_field + +model_instance = BusinessModel() + +#? int() +model_instance.integer_field +#? int() +model_instance.big_integer_field +#? int() +model_instance.positive_integer_field +#? int() +model_instance.small_integer_field +#? str() +model_instance.char_field +#? str() +model_instance.text_field +#? str() +model_instance.email_field +#? str() +model_instance.ip_address_field +#? str() +model_instance.url_field +#? float() +model_instance.float_field +#? bytes() +model_instance.binary_field +#? bool() +model_instance.boolean_field +#? decimal.Decimal() +model_instance.decimal_field +#? datetime.time() +model_instance.time_field +#? datetime.timedelta() +model_instance.duration_field +#? datetime.date() +model_instance.date_field +#? datetime.datetime() +model_instance.date_time_field +#? uuid.UUID() +model_instance.uuid_field + +#! ['attached_o2o = models.OneToOneField(AttachedData)'] +model_instance.attached_o2o +#! ['extra_data = models.TextField()'] +model_instance.attached_o2o.extra_data +#? AttachedData() +model_instance.attached_o2o +#? str() +model_instance.attached_o2o.extra_data + +#! ['category_fk = models.ForeignKey(Category)'] +model_instance.category_fk +#! ['category_name = models.CharField()'] +model_instance.category_fk.category_name +#? Category() +model_instance.category_fk +#? str() +model_instance.category_fk.category_name +#? Category() +model_instance.category_fk2 +#? str() +model_instance.category_fk2.category_name +#? +model_instance.category_fk3 +#? +model_instance.category_fk4 +#? +model_instance.category_fk5 + +#? models.manager.RelatedManager() +model_instance.tags_m2m +#? Tag() +model_instance.tags_m2m.get() +#? ['add'] +model_instance.tags_m2m.add + +#? +model_instance.unidentifiable +#! ['unidentifiable = NOT_FOUND'] +model_instance.unidentifiable + +#? int() +model_instance.method() +#! ['def method'] +model_instance.method + +# ----------------- +# Queries +# ----------------- + +#? ['objects'] +model_instance.object +#? +model_instance.objects +#? +model_instance.objects.filter +#? models.query.QuerySet.filter +BusinessModel.objects.filter +#? BusinessModel() None +BusinessModel.objects.filter().first() +#? str() +BusinessModel.objects.get().char_field +#? int() +BusinessModel.objects.update(x='') +#? BusinessModel() +BusinessModel.objects.create() + +# ----------------- +# Custom object manager +# ----------------- + +#? TagManager() +Tag.objects +#? Tag() None +Tag.objects.filter().first() + +#? TagManager() +Tag.custom_objects +#? Tag() None +Tag.custom_objects.filter().first() + +# ----------------- +# Inheritance +# ----------------- + +class Inherited(BusinessModel): + text_field = models.IntegerField() + new_field = models.FloatField() + +inherited = Inherited() +#? int() +inherited.text_field +#? str() +inherited.char_field +#? float() +inherited.new_field + +#? +Inherited.category_fk2.category_name +#? str() +inherited.category_fk2.category_name +#? str() +Inherited.objects.get().char_field +#? int() +Inherited.objects.get().text_field +#? float() +Inherited.objects.get().new_field + +# ----------------- +# Model methods +# ----------------- + +#? ['from_db'] +Inherited.from_db +#? ['validate_unique'] +Inherited.validate_uniqu +#? ['validate_unique'] +Inherited().validate_unique + +# ----------------- +# Django Auth +# ----------------- + +#? str() +User().email +#? str() +User.objects.get().email + +# ----------------- +# values & values_list (dave is too lazy to implement it) +# ----------------- + +#? +BusinessModel.objects.values_list('char_field')[0] +#? dict() +BusinessModel.objects.values('char_field')[0] +#? +BusinessModel.objects.values('char_field')[0]['char_field'] + +# ----------------- +# Completion +# ----------------- + +#? 19 ['text_field='] +Inherited(text_fiel) +#? 18 ['new_field='] +Inherited(new_fiel) +#? 19 ['char_field='] +Inherited(char_fiel) +#? 19 ['email_field='] +Inherited(email_fie) +#? 19 [] +Inherited(unidentif) +#? 21 ['category_fk=', 'category_fk2=', 'category_fk3=', 'category_fk4=', 'category_fk5='] +Inherited(category_fk) +#? 21 ['attached_o2o='] +Inherited(attached_o2) +#? 18 ['tags_m2m='] +Inherited(tags_m2m) + +#? 32 ['tags_m2m='] +Inherited.objects.create(tags_m2) +#? 32 ['tags_m2m='] +Inherited.objects.filter(tags_m2) +#? 35 ['char_field='] +Inherited.objects.exclude(char_fiel) +#? 34 ['char_field='] +Inherited.objects.update(char_fiel) +#? 32 ['email_field='] +Inherited.objects.get(email_fiel) +#? 44 ['category_fk2='] +Inherited.objects.get_or_create(category_fk2) +#? 44 ['uuid_field='] +Inherited.objects.update_or_create(uuid_fiel) +#? 48 ['char_field='] +Inherited.objects.exclude(pk=3).filter(char_fiel) diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/docstring.py b/bundle/jedi-vim/pythonx/jedi/test/completion/docstring.py new file mode 100644 index 000000000..b2d02239f --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/docstring.py @@ -0,0 +1,323 @@ +""" Test docstrings in functions and classes, which are used to infer types """ + +# ----------------- +# sphinx style +# ----------------- +def sphinxy(a, b, c, d, x): + """ asdfasdf + :param a: blablabla + :type a: str + :type b: (str, int) + :type c: random.Random + :type d: :class:`random.Random` + :param str x: blablabla + :rtype: dict + """ + #? str() + a + #? str() + b[0] + #? int() + b[1] + #? ['seed'] + c.seed + #? ['seed'] + d.seed + #? ['lower'] + x.lower + +#? dict() +sphinxy() + +# wrong declarations +def sphinxy2(a, b, x, y, z): + """ + :param a: Forgot type declaration + :type a: + :param b: Just something + :type b: `` + :param x: Just something without type + :param y: A function + :type y: def l(): pass + :param z: A keyword + :type z: return + :rtype: + """ + #? + a + #? + b + #? + x + #? + y + #? + z + +#? +sphinxy2() + + +def sphinxy_param_type_wrapped(a): + """ + :param str a: + Some description wrapped onto the next line with no space after the + colon. + """ + #? str() + a + + +# local classes -> github #370 +class ProgramNode(): + pass + +def local_classes(node, node2): + """ + :type node: ProgramNode + ... and the class definition after this func definition: + :type node2: ProgramNode2 + """ + #? ProgramNode() + node + #? ProgramNode2() + node2 + +class ProgramNode2(): + pass + + +def list_with_non_imports(lst): + """ + Should be able to work with tuples and lists and still import stuff. + + :type lst: (random.Random, [collections.defaultdict, ...]) + """ + #? ['seed'] + lst[0].seed + + import collections as col + # use some weird index + #? col.defaultdict() + lst[1][10] + + +def two_dots(a): + """ + :type a: json.decoder.JSONDecoder + """ + #? ['raw_decode'] + a.raw_decode + + +# sphinx returns +def return_module_object(): + """ + :rtype: :class:`random.Random` + """ + +#? ['seed'] +return_module_object().seed + + +# ----------------- +# epydoc style +# ----------------- +def epydoc(a, b): + """ asdfasdf + @type a: str + @param a: blablabla + @type b: (str, int) + @param b: blablah + @rtype: list + """ + #? str() + a + #? str() + b[0] + + #? int() + b[1] + +#? list() +epydoc() + + +# Returns with param type only +def rparam(a,b): + """ + @type a: str + """ + return a + +#? str() +rparam() + + +# Composite types +def composite(): + """ + @rtype: (str, int, dict) + """ + +x, y, z = composite() +#? str() +x +#? int() +y +#? dict() +z + + +# Both docstring and calculated return type +def both(): + """ + @rtype: str + """ + return 23 + +#? str() int() +both() + +class Test(object): + def __init__(self): + self.teststr = "" + """ + # jedi issue #210 + """ + def test(self): + #? ['teststr'] + self.teststr + +# ----------------- +# statement docstrings +# ----------------- +d = '' +""" bsdf """ +#? str() +d.upper() + +# ----------------- +# class docstrings +# ----------------- + +class InInit(): + def __init__(self, foo): + """ + :type foo: str + """ + #? str() + foo + + +class InClass(): + """ + :type foo: str + """ + def __init__(self, foo): + #? str() + foo + + +class InBoth(): + """ + :type foo: str + """ + def __init__(self, foo): + """ + :type foo: int + """ + #? str() int() + foo + + +def __init__(foo): + """ + :type foo: str + """ + #? str() + foo + + +# ----------------- +# Renamed imports (#507) +# ----------------- + +import datetime +from datetime import datetime as datetime_imported + +def import_issues(foo): + """ + @type foo: datetime_imported + """ + #? datetime.datetime() + foo + + +# ----------------- +# Doctest completions +# ----------------- + +def doctest_with_gt(): + """ + x + + >>> somewhere_in_docstring = 3 + #? ['import_issues'] + >>> import_issu + #? ['somewhere_in_docstring'] + >>> somewhere_ + + blabla + + >>> haha = 3 + #? ['haha'] + >>> hah + #? ['doctest_with_space'] + >>> doctest_with_sp + """ + +def doctest_with_space(): + """ + x + #? ['import_issues'] + import_issu + """ + +def doctest_issue_github_1748(): + """From GitHub #1748 + #? 10 [] + This. Al + """ + pass + + +def docstring_rst_identifiers(): + """ + #? 30 ['import_issues'] + hello I'm here `import_iss` blabla + + #? ['import_issues'] + hello I'm here `import_iss + + #? [] + hello I'm here import_iss + #? [] + hello I'm here ` import_iss + + #? ['upper'] + hello I'm here `str.upp + """ + + +def doctest_without_ending(): + """ + #? [] + import_issu + ha + + no_ending = False + #? ['import_issues'] + import_issu + #? ['no_ending'] + no_endin diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/dynamic_arrays.py b/bundle/jedi-vim/pythonx/jedi/test/completion/dynamic_arrays.py new file mode 100644 index 000000000..61cc3839e --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/dynamic_arrays.py @@ -0,0 +1,397 @@ +""" +Checking for ``list.append`` and all the other possible array modifications. +""" +# ----------------- +# list.append +# ----------------- +arr = [] +for a in [1,2]: + arr.append(a); + +arr.append # should not cause an exception +arr.append() # should not cause an exception + +#? int() +arr[10] + +arr = [tuple()] +for a in [1,2]: + arr.append(a); + +#? int() tuple() +arr[10] +#? int() +arr[10].index() + +arr = list([]) +arr.append(1) +#? int() +arr[0] + +# ----------------- +# list.insert +# ----------------- +arr = [""] +arr.insert(0, 1.0) + +# on exception due to this, please! +arr.insert(0) +arr.insert() + +#? float() str() +arr[10] + +for a in arr: + #? float() str() + a + +#? float() str() +list(arr)[10] + +# ----------------- +# list.extend / set.update +# ----------------- + +arr = [1.0] +arr.extend([1,2,3]) +arr.extend([]) +arr.extend("") +arr.extend(list) # should ignore + +#? float() int() str() +arr[100] + +a = set(arr) +a.update(list(["", 1])) + +#? float() int() str() +list(a)[0] +# ----------------- +# set/list initialized as functions +# ----------------- + +st = set() +st.add(1) + +#? int() +for s in st: s + +lst = list() +lst.append(1) + +#? int() +for i in lst: i + +# ----------------- +# renames / type changes +# ----------------- +arr = [] +arr2 = arr +arr2.append('') +#? str() +arr2[0] + + +lst = [1] +lst.append(1.0) +s = set(lst) +s.add("ahh") +lst = list(s) +lst.append({}) + +#? dict() int() float() str() +lst[0] + +# should work with tuple conversion, too. +#? dict() int() float() str() +tuple(lst)[0] + +# but not with an iterator +#? +iter(lst)[0] + +# ----------------- +# complex including += +# ----------------- +class C(): pass +class D(): pass +class E(): pass +lst = [1] +lst.append(1.0) +lst += [C()] +s = set(lst) +s.add("") +s += [D()] +lst = list(s) +lst.append({}) +lst += [E()] + +#? dict() int() float() str() C() D() E() +lst[0] + +# ----------------- +# functions +# ----------------- + +def arr_append(arr4, a): + arr4.append(a) + +def add_to_arr(arr2, a): + arr2.append(a) + return arr2 + +def app(a): + arr3.append(a) + +arr3 = [1.0] +res = add_to_arr(arr3, 1) +arr_append(arr3, 'str') +app(set()) + +#? float() str() int() set() +arr3[10] + +#? float() str() int() set() +res[10] + +# ----------------- +# returns, special because the module dicts are not correct here. +# ----------------- +def blub(): + a = [] + a.append(1.0) + #? float() + a[0] + return a + +#? float() +blub()[0] + +# list with default +def blub(): + a = list([1]) + a.append(1.0) + return a + +#? int() float() +blub()[0] + +# empty list +def blub(): + a = list() + a.append(1.0) + return a +#? float() +blub()[0] + +# with if +def blub(): + if 1: + a = [] + a.append(1.0) + return a + +#? float() +blub()[0] + +# with else clause +def blub(): + if random.choice([0, 1]): + 1 + else: + a = [] + a.append(1) + return a + +#? int() +blub()[0] +# ----------------- +# returns, the same for classes +# ----------------- +class C(): + def blub(self, b): + if 1: + a = [] + a.append(b) + return a + + def blub2(self): + """ mapper function """ + a = self.blub(1.0) + #? float() + a[0] + return a + + def literal_arr(self, el): + self.a = [] + self.a.append(el) + #? int() + self.a[0] + return self.a + + def list_arr(self, el): + self.b = list([]) + self.b.append(el) + #? float() + self.b[0] + return self.b + +#? int() +C().blub(1)[0] +#? float() +C().blub2(1)[0] + +#? int() +C().a[0] +#? int() +C().literal_arr(1)[0] + +#? float() +C().b[0] +#? float() +C().list_arr(1.0)[0] + +# ----------------- +# array recursions +# ----------------- + +a = set([1.0]) +a.update(a) +a.update([1]) + +#? float() int() +list(a)[0] + +def first(a): + b = [] + b.append(a) + b.extend(second(a)) + return list(b) + +def second(a): + b = [] + b.extend(first(a)) + return list(b) + +#? float() +first(1.0)[0] + +def third(): + b = [] + b.extend + extend() + b.extend(first()) + return list(b) +#? +third()[0] + + +# ----------------- +# set.add +# ----------------- +st = {1.0} +for a in [1,2]: + st.add(a) + +st.append('') # lists should not have an influence + +st.add # should not cause an exception +st.add() + +st = {1.0} +st.add(1) +lst = list(st) + +lst.append('') + +#? float() int() str() +lst[0] + +# ----------------- +# list setitem +# ----------------- + +some_lst = [int] +some_lst[3] = str +#? int +some_lst[0] +#? str +some_lst[3] +#? int str +some_lst[2] + +some_lst[0] = tuple +#? tuple +some_lst[0] +#? int str tuple +some_lst[1] + +some_lst2 = list([1]) +some_lst2[3] = '' +#? int() str() +some_lst2[0] +#? str() +some_lst2[3] +#? int() str() +some_lst2[2] + +some_lst3 = [] +some_lst3[0] = 3 +some_lst3[:] = '' # Is ignored for now. +#? int() +some_lst3[0] +# ----------------- +# set setitem/other modifications (should not work) +# ----------------- + +some_set = {int} +some_set[3] = str +#? int +some_set[0] +#? int +some_set[3] + +something = object() +something[3] = str +#? +something[0] +#? +something[3] + +# ----------------- +# dict setitem +# ----------------- + +some_dct = {'a': float, 1: int} +some_dct['x'] = list +some_dct['y'] = tuple +#? list +some_dct['x'] +#? int float list tuple +some_dct['unknown'] +#? float +some_dct['a'] + +some_dct = dict({'a': 1, 1: ''}) +#? int() str() +some_dct['la'] +#? int() +some_dct['a'] + +some_dct['x'] = list +some_dct['y'] = tuple +#? list +some_dct['x'] +#? int() str() list tuple +some_dct['unknown'] +k = 'a' +#? int() +some_dct[k] + +some_other_dct = dict(some_dct, c=set) +#? int() +some_other_dct['a'] +#? list +some_other_dct['x'] +#? set +some_other_dct['c'] diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/dynamic_params.py b/bundle/jedi-vim/pythonx/jedi/test/completion/dynamic_params.py new file mode 100644 index 000000000..1a48468a9 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/dynamic_params.py @@ -0,0 +1,159 @@ +""" +This is used for dynamic object completion. +Jedi tries to guess param types with a backtracking approach. +""" +def func(a, default_arg=2): + #? int() + default_arg + #? int() str() + return a + +#? int() +func(1) + +func + +int(1) + (int(2))+ func('') + +# Again the same function, but with another call. +def func(a): + #? float() + return a + +func(1.0) + +# Again the same function, but with no call. +def func(a): + #? + return a + +def func(a): + #? float() + return a +str(func(1.0)) + +# ----------------- +# *args, **args +# ----------------- +def arg(*args): + #? tuple() + args + #? int() + args[0] + +arg(1,"") +# ----------------- +# decorators +# ----------------- +def def_func(f): + def wrapper(*args, **kwargs): + return f(*args, **kwargs) + return wrapper + +@def_func +def func(c): + #? str() + return c + +#? str() +func("something") + +@def_func +def func(c=1): + #? float() + return c + +func(1.0) + +def tricky_decorator(func): + def wrapper(*args): + return func(1, *args) + + return wrapper + + +@tricky_decorator +def func(a, b): + #? int() + a + #? float() + b + +func(1.0) + +# Needs to be here, because in this case func is an import -> shouldn't lead to +# exceptions. +import sys as func +func.sys + +# ----------------- +# classes +# ----------------- + +class A(): + def __init__(self, a): + #? str() + a + +A("s") + +class A(): + def __init__(self, a): + #? int() + a + self.a = a + + def test(self, a): + #? float() + a + self.c = self.test2() + + def test2(self): + #? int() + return self.a + + def test3(self): + #? int() + self.test2() + #? int() + self.c + +A(3).test(2.0) +A(3).test2() + + +def from_class(x): + #? + x + +from UNDEFINED import from_class + +class Foo(from_class(1),): + pass + +# ----------------- +# comprehensions +# ----------------- + +def from_comprehension(foo): + #? int() float() + return foo + +[from_comprehension(1.0) for n in (1,)] +[from_comprehension(n) for n in (1,)] + +# ----------------- +# lambdas +# ----------------- + +#? int() +x_lambda = lambda x: x + +x_lambda(1) + +class X(): + #? str() + x_method = lambda self, a: a + + +X().x_method('') diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/flow_analysis.py b/bundle/jedi-vim/pythonx/jedi/test/completion/flow_analysis.py new file mode 100644 index 000000000..ed1f0533e --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/flow_analysis.py @@ -0,0 +1,310 @@ +# ----------------- +# First a few name resolution things +# ----------------- + +x = 3 +if NOT_DEFINED: + x = '' +#? 6 int() +elif x: + pass +else: + #? int() + x + +x = 1 +try: + x = '' +#? 8 int() str() +except x: + #? 5 int() str() + x + x = 1.0 +else: + #? 5 int() str() + x + x = list +finally: + #? 5 int() str() float() list + x + x = tuple + +if False: + with open("") as defined_in_false: + #? ['flush'] + defined_in_false.flu + +# ----------------- +# Return checks +# ----------------- + +def foo(x): + if 1.0: + return 1 + else: + return '' + +#? int() +foo(1) + + +# Exceptions are not analyzed. So check both if branches +def try_except(x): + try: + if 0: + return 1 + else: + return '' + except AttributeError: + return 1.0 + +#? float() str() +try_except(1) + + +# Exceptions are not analyzed. So check both if branches +def try_except(x): + try: + if 0: + return 1 + else: + return '' + except AttributeError: + return 1.0 + +#? float() str() +try_except(1) + +def test_function(): + a = int(input()) + if a % 2 == 0: + return True + return "False" + +#? bool() str() +test_function() + +# ----------------- +# elif +# ----------------- + +def elif_flows1(x): + if False: + return 1 + elif True: + return 1.0 + else: + return '' + +#? float() +elif_flows1(1) + + +def elif_flows2(x): + try: + if False: + return 1 + elif 0: + return 1.0 + else: + return '' + except ValueError: + return set + +#? str() set +elif_flows2(1) + + +def elif_flows3(x): + try: + if True: + return 1 + elif 0: + return 1.0 + else: + return '' + except ValueError: + return set + +#? int() set +elif_flows3(1) + +# ----------------- +# mid-difficulty if statements +# ----------------- +def check(a): + if a is None: + return 1 + return '' + return set + +#? int() +check(None) +#? str() +check('asb') + +a = list +if 2 == True: + a = set +elif 1 == True: + a = 0 + +#? int() +a +if check != 1: + a = '' +#? int() str() +a +if check == check: + a = list +#? list +a +if check != check: + a = set +else: + a = dict +#? dict +a +if not (check is not check): + a = 1 +#? int() +a + + +# ----------------- +# name resolution +# ----------------- + +a = list +def elif_name(x): + try: + if True: + a = 1 + elif 0: + a = 1.0 + else: + return '' + except ValueError: + a = x + return a + +#? int() set +elif_name(set) + +if 0: + a = '' +else: + a = int + +#? int +a + +# ----------------- +# isinstance +# ----------------- + +class A(): pass + +def isinst(x): + if isinstance(x, A): + return dict + elif isinstance(x, int) and x == 1 or x is True: + return set + elif isinstance(x, (float, reversed)): + return list + elif not isinstance(x, str): + return tuple + return 1 + +#? dict +isinst(A()) +#? set +isinst(True) +#? set +isinst(1) +#? tuple +isinst(2) +#? list +isinst(1.0) +#? tuple +isinst(False) +#? int() +isinst('') + +# ----------------- +# flows that are not reachable should be able to access parent scopes. +# ----------------- + +foobar = '' + +if 0: + within_flow = 1.0 + #? float() + within_flow + #? str() + foobar + if 0: + nested = 1 + #? int() + nested + #? float() + within_flow + #? str() + foobar + #? + nested + +if False: + in_false = 1 + #? ['in_false'] + in_false + +# ----------------- +# True objects like modules +# ----------------- + +class X(): + pass +if X: + a = 1 +else: + a = '' +#? int() +a + + +# ----------------- +# Recursion issues +# ----------------- + +def possible_recursion_error(filename): + if filename == 'a': + return filename + # It seems like without the brackets there wouldn't be a RecursionError. + elif type(filename) == str: + return filename + + +if NOT_DEFINED: + s = str() +else: + s = str() +#? str() +possible_recursion_error(s) + + +# ----------------- +# In combination with imports +# ----------------- + +from import_tree import flow_import + +if 1 == flow_import.env: + a = 1 +elif 2 == flow_import.env: + a = '' +elif 3 == flow_import.env: + a = 1.0 + +#? int() str() +a diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/fstring.py b/bundle/jedi-vim/pythonx/jedi/test/completion/fstring.py new file mode 100644 index 000000000..32f29e9bd --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/fstring.py @@ -0,0 +1,38 @@ +# python >= 3.6 + +class Foo: + bar = 1 + +#? 10 int() +f'{Foo.bar}' +#? 10 ['bar'] +f'{Foo.bar}' +#? 10 int() +Fr'{Foo.bar' +#? 10 ['bar'] +Fr'{Foo.bar' +#? int() +Fr'{Foo.bar +#? ['bar'] +Fr'{Foo.bar +#? ['Exception'] +F"{Excepti + +#? 8 Foo +Fr'a{Foo.bar' +#? str() +Fr'sasdf' + +#? 7 str() +Fr'''sasdf''' + '' + +#? ['upper'] +f'xyz'.uppe + + +#? 3 [] +f'f' + +# Github #1248 +#? int() +{"foo": 1}[f"foo"] diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/functions.py b/bundle/jedi-vim/pythonx/jedi/test/completion/functions.py new file mode 100644 index 000000000..9df5bfcfe --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/functions.py @@ -0,0 +1,498 @@ +def x(): + return + +#? None +x() + +def array(first_param): + #? ['first_param'] + first_param + return list() + +#? [] +array.first_param +#? [] +array.first_param. +func = array +#? [] +func.first_param + +#? list() +array() + +#? ['array'] +arr + + +def inputs(param): + return param + +#? list +inputs(list) + +def variable_middle(): + var = 3 + return var + +#? int() +variable_middle() + +def variable_rename(param): + var = param + return var + +#? int() +variable_rename(1) + +def multi_line_func(a, # comment blabla + + b): + return b + +#? str() +multi_line_func(1,'') + +def multi_line_call(b): + return b + + +multi_line_call( +#? int() + b=1) + + +# nothing after comma +def asdf(a): + return a + +x = asdf(a=1, + ) +#? int() +x + +# ----------------- +# double execution +# ----------------- +def double_exe(param): + return param + +#? str() +variable_rename(double_exe)("") + +# -> shouldn't work (and throw no error) +#? [] +variable_rename(list())(). +#? [] +variable_rename(1)(). + +# ----------------- +# recursions (should ignore) +# ----------------- +def recursion(a, b): + if a: + return b + else: + return recursion(a+".", b+1) + +# Does not also return int anymore, because we now support operators in simple cases. +#? float() +recursion("a", 1.0) + +def other(a): + return recursion2(a) + +def recursion2(a): + if random.choice([0, 1]): + return other(a) + else: + if random.choice([0, 1]): + return recursion2("") + else: + return a + +#? int() str() +recursion2(1) + +# ----------------- +# ordering +# ----------------- + +def a(): + #? int() + b() + return b() + +def b(): + return 1 + +#? int() +a() + +# ----------------- +# keyword arguments +# ----------------- + +def func(a=1, b=''): + return a, b + +exe = func(b=list, a=tuple) +#? tuple +exe[0] + +#? list +exe[1] + +# ----------------- +# default arguments +# ----------------- + +#? int() +func()[0] +#? str() +func()[1] +#? float() +func(1.0)[0] +#? str() +func(1.0)[1] + + +#? float() +func(a=1.0)[0] +#? str() +func(a=1.0)[1] +#? int() +func(b=1.0)[0] +#? float() +func(b=1.0)[1] +#? list +func(a=list, b=set)[0] +#? set +func(a=list, b=set)[1] + + +def func_default(a, b=1): + return a, b + + +def nested_default(**kwargs): + return func_default(**kwargs) + +#? float() +nested_default(a=1.0)[0] +#? int() +nested_default(a=1.0)[1] +#? str() +nested_default(a=1.0, b='')[1] + +# Defaults should only work if they are defined before - not after. +def default_function(a=default): + #? + return a + +#? +default_function() + +default = int() + +def default_function(a=default): + #? int() + return a + +#? int() +default_function() + +def default(a=default): + #? int() + a + +# ----------------- +# closures +# ----------------- +def a(): + l = 3 + def func_b(): + l = '' + #? str() + l + #? ['func_b'] + func_b + #? int() + l + +# ----------------- +# *args +# ----------------- + +def args_func(*args): + #? tuple() + return args + +exe = args_func(1, "") +#? int() +exe[0] +#? str() +exe[1] + +# illegal args (TypeError) +#? +args_func(*1)[0] +# iterator +#? int() +args_func(*iter([1]))[0] + +# different types +e = args_func(*[1 if UNDEFINED else "", {}]) +#? int() str() +e[0] +#? dict() +e[1] + +_list = [1,""] +exe2 = args_func(_list)[0] + +#? str() +exe2[1] + +exe3 = args_func([1,""])[0] + +#? str() +exe3[1] + +def args_func(arg1, *args): + return arg1, args + +exe = args_func(1, "", list) +#? int() +exe[0] +#? tuple() +exe[1] +#? list +exe[1][1] + + +# In a dynamic search, both inputs should be given. +def simple(a): + #? int() str() + return a +def xargs(*args): + return simple(*args) + +xargs(1) +xargs('') + + +# *args without a self symbol +def memoize(func): + def wrapper(*args, **kwargs): + return func(*args, **kwargs) + return wrapper + + +class Something(): + @memoize + def x(self, a, b=1): + return a + +#? int() +Something().x(1) + + +# ----------------- +# ** kwargs +# ----------------- +def kwargs_func(**kwargs): + #? ['keys'] + kwargs.keys + #? dict() + return kwargs + +exe = kwargs_func(a=3,b=4.0) +#? dict() +exe +#? int() +exe['a'] +#? float() +exe['b'] +#? int() float() +exe['c'] + +a = 'a' +exe2 = kwargs_func(**{a:3, + 'b':4.0}) + +#? int() +exe2['a'] +#? float() +exe2['b'] +#? int() float() +exe2['c'] + +exe3 = kwargs_func(**{k: v for k, v in [(a, 3), ('b', 4.0)]}) + +# Should resolve to the same as 2 but jedi is not smart enough yet +# Here to make sure it doesn't result in crash though +#? +exe3['a'] + +#? +exe3['b'] + +#? +exe3['c'] + +# ----------------- +# *args / ** kwargs +# ----------------- + +def func_without_call(*args, **kwargs): + #? tuple() + args + #? dict() + kwargs + +def fu(a=1, b="", *args, **kwargs): + return a, b, args, kwargs + +exe = fu(list, 1, "", c=set, d="") + +#? list +exe[0] +#? int() +exe[1] +#? tuple() +exe[2] +#? str() +exe[2][0] +#? dict() +exe[3] +#? set +exe[3]['c'] + + +def kwargs_iteration(**kwargs): + return kwargs + +for x in kwargs_iteration(d=3): + #? float() + {'d': 1.0, 'c': '1'}[x] + + +# ----------------- +# nested *args +# ----------------- +def function_args(a, b, c): + return b + +def nested_args(*args): + return function_args(*args) + +def nested_args2(*args, **kwargs): + return nested_args(*args) + +#? int() +nested_args('', 1, 1.0, list) +#? [] +nested_args(''). + +#? int() +nested_args2('', 1, 1.0) +#? [] +nested_args2(''). + +# ----------------- +# nested **kwargs +# ----------------- +def nested_kw(**kwargs1): + return function_args(**kwargs1) + +def nested_kw2(**kwargs2): + return nested_kw(**kwargs2) + +# invalid command, doesn't need to return anything +#? +nested_kw(b=1, c=1.0, list) +#? int() +nested_kw(b=1) +# invalid command, doesn't need to return anything +#? +nested_kw(d=1.0, b=1, list) +#? int() +nested_kw(a=3.0, b=1) +#? int() +nested_kw(b=1, a=r"") +#? [] +nested_kw(1, ''). +#? [] +nested_kw(a=''). + +#? int() +nested_kw2(b=1) +#? int() +nested_kw2(b=1, c=1.0) +#? int() +nested_kw2(c=1.0, b=1) +#? [] +nested_kw2(''). +#? [] +nested_kw2(a=''). +#? [] +nested_kw2('', b=1). + +# ----------------- +# nested *args/**kwargs +# ----------------- +def nested_both(*args, **kwargs): + return function_args(*args, **kwargs) + +def nested_both2(*args, **kwargs): + return nested_both(*args, **kwargs) + +# invalid commands, may return whatever. +#? list +nested_both('', b=1, c=1.0, list) +#? list +nested_both('', c=1.0, b=1, list) + +#? [] +nested_both(''). + +#? int() +nested_both2('', b=1, c=1.0) +#? int() +nested_both2('', c=1.0, b=1) +#? [] +nested_both2(''). + +# ----------------- +# nested *args/**kwargs with a default arg +# ----------------- +def function_def(a, b, c): + return a, b + +def nested_def(a, *args, **kwargs): + return function_def(a, *args, **kwargs) + +def nested_def2(*args, **kwargs): + return nested_def(*args, **kwargs) + +#? str() +nested_def2('', 1, 1.0)[0] +#? str() +nested_def2('', b=1, c=1.0)[0] +#? str() +nested_def2('', c=1.0, b=1)[0] +#? int() +nested_def2('', 1, 1.0)[1] +#? int() +nested_def2('', b=1, c=1.0)[1] +#? int() +nested_def2('', c=1.0, b=1)[1] +#? [] +nested_def2('')[1]. + +# ----------------- +# magic methods +# ----------------- +def a(): pass +#? ['__closure__'] +a.__closure__ diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/generators.py b/bundle/jedi-vim/pythonx/jedi/test/completion/generators.py new file mode 100644 index 000000000..3b04d9321 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/generators.py @@ -0,0 +1,316 @@ +# ----------------- +# yield statement +# ----------------- +def gen(): + if random.choice([0, 1]): + yield 1 + else: + yield "" + +gen_exe = gen() +#? int() str() +next(gen_exe) + +#? int() str() list +next(gen_exe, list) + + +def gen_ret(value): + yield value + +#? int() +next(gen_ret(1)) + +#? [] +next(gen_ret()). + +# generators infer to true if cast by bool. +a = '' +if gen_ret(): + a = 3 +#? int() +a + + +# ----------------- +# generators should not be indexable +# ----------------- +def get(param): + if random.choice([0, 1]): + yield 1 + else: + yield "" + +#? [] +get()[0]. + +# ----------------- +# __iter__ +# ----------------- +for a in get(): + #? int() str() + a + + +class Get(): + def __iter__(self): + if random.choice([0, 1]): + yield 1 + else: + yield "" + +b = [] +for a in Get(): + #? int() str() + a + b += [a] + +#? list() +b +#? int() str() +b[0] + +g = iter(Get()) +#? int() str() +next(g) + +g = iter([1.0]) +#? float() +next(g) + +x, y = Get() +#? int() str() +x +#? int() str() +x + +class Iter: + def __iter__(self): + yield "" + i = 0 + while True: + v = 1 + yield v + i += 1 +a, b, c = Iter() +#? str() int() +a +#? str() int() +b +#? str() int() +c + + +# ----------------- +# __next__ +# ----------------- +class Counter: + def __init__(self, low, high): + self.current = low + self.high = high + + def __iter__(self): + return self + + def next(self): + """ need to have both __next__ and next, because of py2/3 testing """ + return self.__next__() + + def __next__(self): + if self.current > self.high: + raise StopIteration + else: + self.current += 1 + return self.current - 1 + + +for c in Counter(3, 8): + #? int() + print c + + +# ----------------- +# tuple assignments +# ----------------- +def gen(): + if random.choice([0,1]): + yield 1, "" + else: + yield 2, 1.0 + + +a, b = next(gen()) +#? int() +a +#? str() float() +b + + +def simple(): + if random.choice([0, 1]): + yield 1 + else: + yield "" + +a, b = simple() +#? int() str() +a +# For now this is ok. +#? int() str() +b + + +def simple2(): + yield 1 + yield "" + +a, b = simple2() +#? int() +a +#? str() +b + +a, = (a for a in [1]) +#? int() +a + +# ----------------- +# More complicated access +# ----------------- + +# `close` is a method wrapper. +#? ['__call__'] +gen().close.__call__ + +#? +gen().throw() + +#? ['co_consts'] +gen().gi_code.co_consts + +#? [] +gen.gi_code.co_consts + +# `send` is also a method wrapper. +#? ['__call__'] +gen().send.__call__ + +#? tuple() +gen().send() + +#? +gen()() + +# ----------------- +# empty yield +# ----------------- + +def x(): + yield + +#? None +next(x()) +#? gen() +x() + +def x(): + for i in range(3): + yield + +#? None +next(x()) + +# ----------------- +# yield in expression +# ----------------- + +def x(): + a= [(yield 1)] + +#? int() +next(x()) + +# ----------------- +# statements +# ----------------- +def x(): + foo = yield + #? + foo + +# ----------------- +# yield from +# ----------------- + +def yield_from(): + yield from iter([1]) + +#? int() +next(yield_from()) + +def yield_from_multiple(): + yield from iter([1]) + yield str() + return 2.0 + +x, y = yield_from_multiple() +#? int() +x +#? str() +y + +def test_nested(): + x = yield from yield_from_multiple() + #? float() + x + yield x + +x, y, z = test_nested() +#? int() +x +#? str() +y +# For whatever reason this is currently empty +#? float() +z + + +def test_in_brackets(): + x = 1 + (yield from yield_from_multiple()) + #? float() + x + + generator = (1 for 1 in [1]) + x = yield from generator + #? None + x + x = yield from 1 + #? + x + x = yield from [1] + #? None + x + + +# ----------------- +# Annotations +# ----------------- + +from typing import Iterator + +def annotation1() -> float: + yield 1 + +def annotation2() -> Iterator[float]: + yield 1 + + +#? +next(annotation1()) +#? float() +next(annotation2()) + + +# annotations should override generator inference +#? float() +annotation1() diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/goto.py b/bundle/jedi-vim/pythonx/jedi/test/completion/goto.py new file mode 100644 index 000000000..9a05c1102 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/goto.py @@ -0,0 +1,258 @@ +# goto command tests are different in syntax + +definition = 3 +#! 0 ['a = definition'] +a = definition + +#! [] +b +#! ['a = definition'] +a + +b = a +c = b +#! ['c = b'] +c + +cd = 1 +#! 1 ['cd = c'] +cd = c +#! 0 ['cd = e'] +cd = e + +#! ['module math'] +import math +#! ['module math'] +math + +#! ['module math'] +b = math +#! ['b = math'] +b + +#! 18 ['foo = 10'] +foo = 10;print(foo) + +# ----------------- +# classes +# ----------------- +class C(object): + x = 3 + def b(self): + #! ['b = math'] + b + #! ['def b'] + self.b + #! 14 ['def b'] + self.b() + #! 14 ['def b'] + self.b. + #! 11 ['param self'] + self.b + #! ['x = 3'] + self.x + #! 14 ['x = 3'] + self.x. + return 1 + + #! ['def b'] + b + +#! ['b = math'] +b + +#! ['def b'] +C.b +#! ['def b'] +C().b +#! 0 ['class C'] +C().b +#! 0 ['class C'] +C().b + +D = C +#! ['def b'] +D.b +#! ['def b'] +D().b + +#! 0 ['D = C'] +D().b +#! 0 ['D = C'] +D().b + +def c(): + return '' + +#! ['def c'] +c +#! 0 ['def c'] +c() + + +class ClassVar(): + x = 3 + +#! ['x = 3'] +ClassVar.x +#! ['x = 3'] +ClassVar().x + +# before assignments +#! 10 ['x = 3'] +ClassVar.x = '' +#! 12 ['x = 3'] +ClassVar().x = '' + +# Recurring use of the same var name, github #315 +def f(t=None): + #! 9 ['param t=None'] + t = t or 1 + + +class X(): + pass + +#! 3 [] +X(foo=x) + + +# Multiple inheritance +class Foo: + def foo(self): + print("foo") +class Bar: + def bar(self): + print("bar") +class Baz(Foo, Bar): + def baz(self): + #! ['def foo'] + super().foo + #! ['def bar'] + super().bar + #! ['instance Foo'] + super() + +# ----------------- +# imports +# ----------------- + +#! ['module import_tree'] +import import_tree +#! ["a = ''"] +import_tree.a + +#! ['module mod1'] +import import_tree.mod1 +#! ['module mod1'] +from import_tree.mod1 +#! ['a = 1'] +import_tree.mod1.a + +#! ['module pkg'] +import import_tree.pkg +#! ['a = list'] +import_tree.pkg.a + +#! ['module mod1'] +import import_tree.pkg.mod1 +#! ['a = 1.0'] +import_tree.pkg.mod1.a +#! ["a = ''"] +import_tree.a + +#! ['module mod1'] +from import_tree.pkg import mod1 +#! ['a = 1.0'] +mod1.a + +#! ['module mod1'] +from import_tree import mod1 +#! ['a = 1'] +mod1.a + +#! ['a = 1.0'] +from import_tree.pkg.mod1 import a + +#! ['module os'] +from .imports import os + +#! ['some_variable = 1'] +from . import some_variable + +# ----------------- +# anonymous classes +# ----------------- +def func(): + class A(): + def b(self): + return 1 + return A() + +#! 8 ['def b'] +func().b() + +# ----------------- +# on itself +# ----------------- + +#! 7 ['class ClassDef'] +class ClassDef(): + """ abc """ + pass + +# ----------------- +# params +# ----------------- + +param = ClassDef +#! 8 ['param param'] +def ab1(param): pass +#! 9 ['param param'] +def ab2(param): pass +#! 11 ['param = ClassDef'] +def ab3(a=param): pass + +ab1(ClassDef);ab2(ClassDef);ab3(ClassDef) + +# ----------------- +# for loops +# ----------------- + +for i in range(1): + #! ['for i in range(1): i'] + i + +for key, value in [(1,2)]: + #! ['for key, value in [(1,2)]: key'] + key + +#! 4 ['for y in [1]: y'] +for y in [1]: + #! ['for y in [1]: y'] + y + +# ----------------- +# decorator +# ----------------- +def dec(dec_param=3): + pass + +#! 8 ['param dec_param=3'] +@dec(dec_param=5) +def y(): + pass + +class ClassDec(): + def class_func(func): + return func + +#! 14 ['def class_func'] +@ClassDec.class_func +def x(): + pass + +#! 2 ['class ClassDec'] +@ClassDec.class_func +def z(): + pass diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/__init__.py new file mode 100644 index 000000000..5cbbcd7d1 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/__init__.py @@ -0,0 +1,7 @@ +a = '' + +from . import invisible_pkg + +the_pkg = invisible_pkg + +invisible_pkg = 1 diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/classes.py b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/classes.py new file mode 100644 index 000000000..23b088c31 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/classes.py @@ -0,0 +1,10 @@ +blub = 1 + +class Config2(): + pass + + +class BaseClass(): + mode = Config2() + if isinstance(whaat, int): + mode2 = whaat diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/flow_import.py b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/flow_import.py new file mode 100644 index 000000000..a0a779eca --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/flow_import.py @@ -0,0 +1,4 @@ +if name: + env = 1 +else: + env = 2 diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/globals.py b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/globals.py new file mode 100644 index 000000000..492f652a5 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/globals.py @@ -0,0 +1,5 @@ + + +def something(): + global foo + foo = 3 diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/invisible_pkg.py b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/invisible_pkg.py new file mode 100644 index 000000000..9c78ce2fa --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/invisible_pkg.py @@ -0,0 +1,7 @@ +""" +It should not be possible to import this pkg except for the import_tree itself, +because it is overwritten there. (It would be possible with a sys.path +modification, though). +""" + +foo = 1.0 diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/mod1.py b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/mod1.py new file mode 100644 index 000000000..bd696d642 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/mod1.py @@ -0,0 +1,4 @@ +a = 1 +from import_tree.random import a as c + +foobarbaz = 3.0 diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/mod2.py b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/mod2.py new file mode 100644 index 000000000..19914f585 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/mod2.py @@ -0,0 +1 @@ +from . import mod1 as fake diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/pkg/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/pkg/__init__.py new file mode 100644 index 000000000..480f22213 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/pkg/__init__.py @@ -0,0 +1,3 @@ +a = list + +from math import * diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/pkg/base.py b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/pkg/base.py new file mode 100644 index 000000000..e3de9b5a0 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/pkg/base.py @@ -0,0 +1,3 @@ +class MyBase: + def f1(self): + pass diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/pkg/mod1.py b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/pkg/mod1.py new file mode 100644 index 000000000..fe1d27faa --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/pkg/mod1.py @@ -0,0 +1,3 @@ +a = 1.0 + +from ..random import foobar diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/random.py b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/random.py new file mode 100644 index 000000000..7a34c4e39 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/random.py @@ -0,0 +1,6 @@ +""" +Here because random is also a builtin module. +""" +a = set + +foobar = 0 diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/recurse_class1.py b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/recurse_class1.py new file mode 100644 index 000000000..cd1dd6a0f --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/recurse_class1.py @@ -0,0 +1,5 @@ +from . import recurse_class2 + +class C(recurse_class2.C): + def a(self): + pass diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/recurse_class2.py b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/recurse_class2.py new file mode 100644 index 000000000..c0a7df68d --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/recurse_class2.py @@ -0,0 +1,4 @@ +from . import recurse_class1 + +class C(recurse_class1.C): + pass diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/references.py b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/references.py new file mode 100644 index 000000000..dd3c9838f --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/references.py @@ -0,0 +1,5 @@ +from ..usages import usage_definition + + +def x(): + usage_definition() diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/rename1.py b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/rename1.py new file mode 100644 index 000000000..bdc33159b --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/rename1.py @@ -0,0 +1,3 @@ +""" used for renaming tests """ + +abc = 3 diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/rename2.py b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/rename2.py new file mode 100644 index 000000000..3d1a12ee7 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/import_tree/rename2.py @@ -0,0 +1,6 @@ +""" used for renaming tests """ + + +from import_tree.rename1 import abc + +abc diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/imports.py b/bundle/jedi-vim/pythonx/jedi/test/completion/imports.py new file mode 100644 index 000000000..bd1bf75ba --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/imports.py @@ -0,0 +1,325 @@ +# ----------------- +# own structure +# ----------------- + +# do separate scopes +def scope_basic(): + from import_tree import mod1 + + #? int() + mod1.a + + #? [] + import_tree.a + + #? [] + import_tree.mod1 + + import import_tree + #? str() + import_tree.a + + +def scope_pkg(): + import import_tree.mod1 + + #? str() + import_tree.a + + #? ['mod1'] + import_tree.mod1 + + #? int() + import_tree.mod1.a + +def scope_nested(): + import import_tree.pkg.mod1 + + #? str() + import_tree.a + + #? list + import_tree.pkg.a + + #? ['sqrt'] + import_tree.pkg.sqrt + + #? ['pkg'] + import_tree.p + + #? float() + import_tree.pkg.mod1.a + #? ['a', 'foobar', '__name__', '__package__', '__file__', '__doc__'] + a = import_tree.pkg.mod1. + + import import_tree.random + #? set + import_tree.random.a + +def scope_nested2(): + """Multiple modules should be indexable, if imported""" + import import_tree.mod1 + import import_tree.pkg + #? ['mod1'] + import_tree.mod1 + #? ['pkg'] + import_tree.pkg + + # With the latest changes this completion also works, because submodules + # are always included (some nested import structures lead to this, + # typically). + #? ['rename1'] + import_tree.rename1 + +def scope_from_import_variable(): + """ + All of them shouldn't work, because "fake" imports don't work in python + without the use of ``sys.modules`` modifications (e.g. ``os.path`` see also + github issue #213 for clarification. + """ + a = 3 + #? + from import_tree.mod2.fake import a + #? + from import_tree.mod2.fake import c + + #? + a + #? + c + +def scope_from_import_variable_with_parenthesis(): + from import_tree.mod2.fake import ( + a, foobarbaz + ) + + #? + a + #? + foobarbaz + # shouldn't complete, should still list the name though. + #? ['foobarbaz'] + foobarbaz + + +def as_imports(): + from import_tree.mod1 import a as xyz + #? int() + xyz + import not_existant, import_tree.mod1 as foo + #? int() + foo.a + import import_tree.mod1 as bar + #? int() + bar.a + + +def broken_import(): + import import_tree.mod1 + #? import_tree.mod1 + from import_tree.mod1 + + #? 25 import_tree.mod1 + import import_tree.mod1. + #? 25 import_tree.mod1 + impo5t import_tree.mod1.foo + #? 25 import_tree.mod1 + import import_tree.mod1.foo. + #? 31 import_tree.mod1 + import json, import_tree.mod1.foo. + + # Cases with ; + mod1 = 3 + #? 25 int() + import import_tree; mod1. + #? 38 import_tree.mod1 + import_tree; import import_tree.mod1. + + #! ['module json'] + from json + + +def test_import_priorities(): + """ + It's possible to overwrite import paths in an ``__init__.py`` file, by + just assigining something there. + + See also #536. + """ + from import_tree import the_pkg, invisible_pkg + #? int() + invisible_pkg + # In real Python, this would be the module, but it's not, because Jedi + # doesn't care about most stateful issues such as __dict__, which it would + # need to, to do this in a correct way. + #? int() + the_pkg + # Importing foo is still possible, even though inivisible_pkg got changed. + #? float() + from import_tree.invisible_pkg import foo + + +# ----------------- +# std lib modules +# ----------------- +import tokenize +#? ['tok_name'] +tokenize.tok_name + +from pyclbr import * + +#? ['readmodule_ex'] +readmodule_ex +import os + +#? ['dirname'] +os.path.dirname + +from os.path import ( + expanduser +) + +#? os.path.expanduser +expanduser + +from itertools import (tee, + islice) +#? ['islice'] +islice + +from functools import (partial, wraps) +#? ['wraps'] +wraps + +from keyword import kwlist, \ + iskeyword +#? ['kwlist'] +kwlist + +#? [] +from keyword import not_existing1, not_existing2 + +from tokenize import io +tokenize.generate_tokens + +import socket +#? 14 ['SocketIO'] +socket.SocketIO + +# ----------------- +# builtins +# ----------------- + +import sys +#? ['prefix'] +sys.prefix + +#? ['append'] +sys.path.append + +from math import * +#? ['cos', 'cosh'] +cos + +def func_with_import(): + import time + return time + +#? ['sleep'] +func_with_import().sleep + +# ----------------- +# relative imports +# ----------------- + +from .import_tree import mod1 +#? int() +mod1.a + +from ..import_tree import mod1 +#? +mod1.a + +from .......import_tree import mod1 +#? +mod1.a + +from .. import helpers +#? int() +helpers.sample_int + +from ..helpers import sample_int as f +#? int() +f + +from . import run +#? [] +run. + +from . import import_tree as imp_tree +#? str() +imp_tree.a + +from . import datetime as mod1 +#? [] +mod1. + +# self import +# this can cause recursions +from imports import * + +# ----------------- +# packages +# ----------------- + +from import_tree.mod1 import c +#? set +c + +from import_tree import recurse_class1 + +#? ['a'] +recurse_class1.C.a +# github #239 RecursionError +#? ['a'] +recurse_class1.C().a + +# ----------------- +# Jedi debugging +# ----------------- + +# memoizing issues (check git history for the fix) +import not_existing_import + +if not_existing_import: + a = not_existing_import +else: + a = not_existing_import +#? +a + +# ----------------- +# module underscore descriptors +# ----------------- + +def underscore(): + import keyword + #? ['__file__'] + keyword.__file__ + #? str() + keyword.__file__ + + # Does that also work for our own module? + #? ['__file__'] + __file__ + + +# ----------------- +# complex relative imports #784 +# ----------------- +def relative(): + #? ['foobar'] + from import_tree.pkg.mod1 import foobar + #? int() + foobar + return 1 diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/inheritance.py b/bundle/jedi-vim/pythonx/jedi/test/completion/inheritance.py new file mode 100644 index 000000000..f7619a044 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/inheritance.py @@ -0,0 +1,76 @@ + +class Super(object): + attribute = 3 + + def func(self): + return 1 + + class Inner(): + pass + + +class Sub(Super): + #? 13 Sub.attribute + def attribute(self): + pass + + #! 8 ['attribute = 3'] + def attribute(self): + pass + + #! 4 ['def func'] + func = 3 + #! 12 ['def func'] + class func(): pass + + #! 8 ['class Inner'] + def Inner(self): pass + +# ----------------- +# Finding self +# ----------------- + +class Test1: + class Test2: + def __init__(self): + self.foo_nested = 0 + #? ['foo_nested'] + self.foo_ + #? + self.foo_here + + def __init__(self, self2): + self.foo_here = 3 + #? ['foo_here', 'foo_in_func'] + self.foo_ + #? int() + self.foo_here + #? + self.foo_nested + #? + self.foo_not_on_self + #? float() + self.foo_in_func + self2.foo_on_second = '' + + def closure(): + self.foo_in_func = 4. + + def bar(self): + self = 3 + self.foo_not_on_self = 3 + + +class SubTest(Test1): + def __init__(self): + self.foo_sub_class = list + + def bar(self): + #? ['foo_here', 'foo_in_func', 'foo_sub_class'] + self.foo_ + #? int() + self.foo_here + #? + self.foo_nested + #? + self.foo_not_on_self diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/invalid.py b/bundle/jedi-vim/pythonx/jedi/test/completion/invalid.py new file mode 100644 index 000000000..0a76a80eb --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/invalid.py @@ -0,0 +1,214 @@ +""" +This file is less about the results and much more about the fact, that no +exception should be thrown. + +Basically this file could change depending on the current implementation. But +there should never be any errors. +""" + +# wait until keywords are out of definitions (pydoc function). +#? 5 +'s'() + +#? [] +str()).upper + +# ----------------- +# funcs +# ----------------- +def asdf(a or b): # multiple param names + return a + +#? +asdf(2) + +asdf = '' + +from a import (b +def blub(): + return 0 +def wrong_indents(): + asdf = 3 + asdf + asdf( + # TODO this seems to be wrong now? + #? int() + asdf +def openbrace(): + asdf = 3 + asdf( + #? int() + asdf + return 1 + +#? int() +openbrace() + +blub([ +#? int() +openbrace() + +def indentfault(): + asd( + indentback + +#? [] +indentfault(). + +def openbrace2(): + asd( +def normalfunc(): + return 1 + +#? int() +normalfunc() + +# dots in param +def f(seq1...=None): + return seq1 +#? +f(1) + +@ +def test_empty_decorator(): + return 1 + +#? int() +test_empty_decorator() + +def invalid_param(param=): + #? + param +# ----------------- +# flows +# ----------------- + +# first part not complete (raised errors) +if a + a +else: + #? ['AttributeError'] + AttributeError + +try +#? ['AttributeError'] +except AttributeError + pass +finally: + pass + +#? ['isinstance'] +if isi +try: + except TypeError: + #? str() + str() + +def break(): pass +# wrong ternary expression +a = '' +a = 1 if +#? str() +a + +# No completions for for loops without the right syntax +for for_local in : + for_local +#? [] +for_local +#? +for_local + + +# ----------------- +# list comprehensions +# ----------------- + +a2 = [for a2 in [0]] +#? +a2[0] + +a3 = [for xyz in] +#? +a3[0] + +a3 = [a4 for in 'b'] +#? +a3[0] + +a3 = [a4 for a in for x in y] +#? +a3[0] + +a = [for a in +def break(): pass + +#? str() +a[0] + +a = [a for a in [1,2] +def break(): pass +#? str() +a[0] + +#? [] +int()).real + +# ----------------- +# keywords +# ----------------- + +#! [] +as + +def empty_assert(): + x = 3 + assert + #? int() + x + +import datetime as + + +# ----------------- +# statements +# ----------------- + +call = '' +invalid = .call +#? +invalid + +invalid = call?.call +#? str() +invalid + +# comma +invalid = ,call +#? str() +invalid + + +# ----------------- +# classes +# ----------------- + +class BrokenPartsOfClass(): + def foo(self): + # This construct contains two places where Jedi with Python 3 can fail. + # It should just ignore those constructs and still execute `bar`. + pass + if 2: + try: + pass + except ValueError, e: + raise TypeError, e + else: + pass + + def bar(self): + self.x = 3 + return '' + +#? str() +BrokenPartsOfClass().bar() diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/isinstance.py b/bundle/jedi-vim/pythonx/jedi/test/completion/isinstance.py new file mode 100644 index 000000000..3eebfe277 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/isinstance.py @@ -0,0 +1,112 @@ +if isinstance(i, str): + #? str() + i + +if isinstance(j, (str, int)): + #? str() int() + j + +while isinstance(k, (str, int)): + #? str() int() + k + +if not isinstance(k, (str, int)): + #? + k + +while not isinstance(k, (str, int)): + #? + k + +assert isinstance(ass, int) +#? int() +ass + +assert isinstance(ass, str) +assert not isinstance(ass, int) + +if 2: + #? str() + ass + +# ----------------- +# invalid arguments +# ----------------- + +if isinstance(wrong, str()): + #? + wrong + +# ----------------- +# in functions +# ----------------- + +import datetime + + +def fooooo(obj): + if isinstance(obj, datetime.datetime): + #? datetime.datetime() + obj + + +def fooooo2(obj): + if isinstance(obj, datetime.date): + return obj + else: + return 1 + +a +# In earlier versions of Jedi, this returned both datetime and int, but now +# Jedi does flow checks and realizes that the top return isn't executed. +#? int() +fooooo2('') + + +def isinstance_func(arr): + for value in arr: + if isinstance(value, dict): + # Shouldn't fail, even with the dot. + #? 17 dict() + value. + elif isinstance(value, int): + x = value + #? int() + x + +# ----------------- +# Names with multiple indices. +# ----------------- + +class Test(): + def __init__(self, testing): + if isinstance(testing, str): + self.testing = testing + else: + self.testing = 10 + + def boo(self): + if isinstance(self.testing, str): + # TODO this is wrong, it should only be str. + #? str() int() + self.testing + #? Test() + self + +# ----------------- +# Syntax +# ----------------- + +#? +isinstance(1, int()) + +# ----------------- +# more complicated arguments +# ----------------- + +def ayyyyyye(obj): + if isinstance(obj.obj, str): + #? + obj.obj + #? + obj diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/keywords.py b/bundle/jedi-vim/pythonx/jedi/test/completion/keywords.py new file mode 100644 index 000000000..fa9bf52dd --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/keywords.py @@ -0,0 +1,59 @@ + +#? ['raise'] +raise + +#? ['Exception'] +except + +#? [] +b + continu + +#? [] +b + continue + +#? ['continue'] +b; continue + +#? ['continue'] +b; continu + +#? [] +c + pass + +#? [] +a + pass + +#? ['pass'] +b; pass + +# ----------------- +# Keywords should not appear everywhere. +# ----------------- + +#? [] +with open() as f +#? [] +def i +#? [] +class i + +#? [] +continue i + +# More syntax details, e.g. while only after newline, but not after semicolon, +# continue also after semicolon +#? ['while'] +while +#? [] +x while +#? [] +x; while +#? ['continue'] +x; continue + +#? [] +and +#? ['and'] +x and +#? [] +x * and diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/lambdas.py b/bundle/jedi-vim/pythonx/jedi/test/completion/lambdas.py new file mode 100644 index 000000000..524df0cec --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/lambdas.py @@ -0,0 +1,113 @@ +# ----------------- +# lambdas +# ----------------- +a = lambda: 3 +#? int() +a() + +x = [] +a = lambda x: x +#? int() +a(0) + +#? float() +(lambda x: x)(3.0) + +arg_l = lambda x, y: y, x +#? float() +arg_l[0]('', 1.0) +#? list() +arg_l[1] + +arg_l = lambda x, y: (y, x) +args = 1,"" +result = arg_l(*args) +#? tuple() +result +#? str() +result[0] +#? int() +result[1] + +def with_lambda(callable_lambda, *args, **kwargs): + return callable_lambda(1, *args, **kwargs) + +#? int() +with_lambda(arg_l, 1.0)[1] +#? float() +with_lambda(arg_l, 1.0)[0] +#? float() +with_lambda(arg_l, y=1.0)[0] +#? int() +with_lambda(lambda x: x) +#? float() +with_lambda(lambda x, y: y, y=1.0) + +arg_func = lambda *args, **kwargs: (args[0], kwargs['a']) +#? int() +arg_func(1, 2, a='', b=10)[0] +#? list() +arg_func(1, 2, a=[], b=10)[1] + +# magic method +a = lambda: 3 +#? ['__closure__'] +a.__closure__ + +class C(): + def __init__(self, foo=1.0): + self.a = lambda: 1 + self.foo = foo + + def ret(self): + return lambda: self.foo + + def with_param(self): + return lambda x: x + self.a() + + lambd = lambda self: self.foo + +#? int() +C().a() + +#? str() +C('foo').ret()() + +index = C().with_param()(1) +#? float() +['', 1, 1.0][index] + +#? float() +C().lambd() +#? int() +C(1).lambd() + + +def xy(param): + def ret(a, b): + return a + b + + return lambda b: ret(param, b) + +#? int() +xy(1)(2) + +# ----------------- +# lambda param (#379) +# ----------------- +class Test(object): + def __init__(self, pred=lambda a, b: a): + self.a = 1 + #? int() + self.a + #? float() + pred(1.0, 2) + +# ----------------- +# test_nocond in grammar (happens in list comprehensions with `if`) +# ----------------- +# Doesn't need to do anything yet. It should just not raise an error. These +# nocond lambdas make no sense at all. + +#? int() +[a for a in [1,2] if lambda: 3][0] diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/named_expression.py b/bundle/jedi-vim/pythonx/jedi/test/completion/named_expression.py new file mode 100644 index 000000000..11293b68e --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/named_expression.py @@ -0,0 +1,52 @@ +# For assignment expressions / named expressions / walrus operators / whatever +# they are called. + +# python >= 3.8 +b = (a:=1, a) + +#? int() +b[0] +#? +b[1] + +# Should not fail +b = ('':=1,) + +#? int() +b[0] + +def test_assignments(): + match = '' + #? str() + match + #? 8 int() + if match := 1: + #? int() + match + #? int() + match + +def test_assignments2(): + class Foo: + match = '' + #? str() + Foo.match + #? 13 int() + if Foo.match := 1: + #? str() + Foo.match + #? str() + Foo.match + + #? + y + #? 16 str() + if y := Foo.match: + #? str() + y + #? str() + y + + #? 8 str() + if z := Foo.match: + pass diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/named_param.py b/bundle/jedi-vim/pythonx/jedi/test/completion/named_param.py new file mode 100644 index 000000000..de8073e9d --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/named_param.py @@ -0,0 +1,143 @@ +""" +Named Params: +>>> def a(abc): pass +... +>>> a(abc=3) # <- this stuff (abc) +""" + +def a(abc): + pass + +#? 5 ['abc='] +a(abc) + + +def a(*some_args, **some_kwargs): + pass + +#? 11 [] +a(some_args) + +#? 13 [] +a(some_kwargs) + +def multiple(foo, bar): + pass + +#? 17 ['bar='] +multiple(foo, bar) + +#? ['bar='] +multiple(foo, bar + +my_lambda = lambda lambda_param: lambda_param + 1 +#? 22 ['lambda_param='] +my_lambda(lambda_param) + +# __call__ / __init__ +class Test(object): + def __init__(self, hello_other): + pass + + def __call__(self, hello): + pass + + def test(self, blub): + pass + +#? 10 ['hello_other='] +Test(hello=) +#? 12 ['hello='] +Test()(hello=) +#? 11 [] +Test()(self=) +#? 16 [] +Test().test(self=) +#? 16 ['blub='] +Test().test(blub=) + +# builtins + +#? 12 [] +any(iterable=) + + +def foo(xyz): + pass + +#? 7 ['xyz='] +@foo(xy) +def x(): pass + +#? 7 ['xyz='] +foo(xyz) +# No completion should be possible if it's not a simple name +#? 17 [] +x = " "; foo(x.xyz) +#? 17 [] +x = " "; foo([xyz) +#? 20 [] +x = " "; foo(z[f,xyz) +#? 18 [] +x = " "; foo(z[xyz) +#? 20 [] +x = " "; foo(xyz[xyz) +#? 20 [] +x = " "; foo(xyz[(xyz) + +#? 8 ['xyz='] +@foo(xyz) +def x(): pass + +@str +#? 8 ['xyz='] +@foo(xyz) +def x(): pass + +# ----------------- +# Only keyword arguments are valid +# ----------------- + +def x(bam, *, bar, baz): + pass +def y(bam, *bal, bar, baz, **bag): + pass +def z(bam, bar=2, *, bas=1): + pass + +#? 7 ['bar=', 'baz='] +x(1, ba) + +#? 14 ['baz='] +x(1, bar=2, ba) +#? 7 ['bar=', 'baz='] +x(1, ba, baz=3) +#? 14 ['baz='] +x(1, bar=2, baz=3) +#? 7 ['BaseException'] +x(basee) +#? 22 ['bar=', 'baz='] +x(1, 2, 3, 4, 5, 6, bar=2) + +#? 14 ['baz='] +y(1, bar=2, ba) +#? 7 ['bar=', 'BaseException', 'baz='] +y(1, ba, baz=3) +#? 14 ['baz='] +y(1, bar=2, baz=3) +#? 7 ['BaseException'] +y(basee) +#? 22 ['bar=', 'BaseException', 'baz='] +y(1, 2, 3, 4, 5, 6, bar=2) + +#? 11 ['bar=', 'bas='] +z(bam=1, bar=2, bas=3) +#? 8 ['BaseException', 'bas='] +z(1, bas=2) +#? 12 ['BaseException'] +z(1, bas=bas) + +#? 19 ['dict'] +z(1, bas=bas, **dic) +#? 18 ['dict'] +z(1, bas=bas, *dic) diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/namespace1/pkg1/pkg2/mod1.py b/bundle/jedi-vim/pythonx/jedi/test/completion/namespace1/pkg1/pkg2/mod1.py new file mode 100644 index 000000000..84d278381 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/namespace1/pkg1/pkg2/mod1.py @@ -0,0 +1 @@ +mod1_name = 'mod1' diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/namespace2/pkg1/pkg2/mod2.py b/bundle/jedi-vim/pythonx/jedi/test/completion/namespace2/pkg1/pkg2/mod2.py new file mode 100644 index 000000000..75dac3246 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/namespace2/pkg1/pkg2/mod2.py @@ -0,0 +1 @@ +mod2_name = 'mod2' diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/ns_path.py b/bundle/jedi-vim/pythonx/jedi/test/completion/ns_path.py new file mode 100644 index 000000000..419c1bd7a --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/ns_path.py @@ -0,0 +1,18 @@ +import sys +import os +from os.path import dirname + +sys.path.insert(0, os.path.join(dirname(__file__), 'namespace2')) +sys.path.insert(0, os.path.join(dirname(__file__), 'namespace1')) + +#? ['mod1'] +import pkg1.pkg2.mod1 + +#? ['mod2'] +import pkg1.pkg2.mod2 + +#? ['mod1_name'] +pkg1.pkg2.mod1.mod1_name + +#? ['mod2_name'] +pkg1.pkg2.mod2.mod2_name diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/on_import.py b/bundle/jedi-vim/pythonx/jedi/test/completion/on_import.py new file mode 100644 index 000000000..91524df8b --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/on_import.py @@ -0,0 +1,114 @@ +def from_names(): + #? ['mod1', 'base'] + from import_tree.pkg. + #? ['path'] + from os. + +def from_names_goto(): + from import_tree import pkg + #? pkg + from import_tree.pkg + +def builtin_test(): + #? ['math'] + import math + #? ['mmap'] + import mmap + +# ----------------- +# completions within imports +# ----------------- + +#? ['sqlite3'] +import sqlite3 + +# classes is a local module that has an __init__.py and can therefore not be +# found. +#? [] +import classes + +#? ['timedelta'] +from datetime import timedel +#? 21 [] +from datetime.timedel import timedel + +# should not be possible, because names can only be looked up 1 level deep. +#? [] +from datetime.timedelta import resolution +#? [] +from datetime.timedelta import + +#? ['Cursor'] +from sqlite3 import Cursor + +#? ['some_variable'] +from . import some_variable +#? ['arrays'] +from . import arrays +#? [] +from . import import_tree as ren +#? [] +import json as + +import os +#? os.path.join +from os.path import join + +# ----------------- +# special positions -> edge cases +# ----------------- +import datetime + +#? 6 datetime +from datetime.time import time + +#? [] +import datetime. +#? [] +import datetime.date + +#? 21 ['import'] +from import_tree.pkg import pkg +#? 49 ['a', 'foobar', '__name__', '__doc__', '__file__', '__package__'] +from import_tree.pkg.mod1 import not_existant, # whitespace before +#? ['a', 'foobar', '__name__', '__doc__', '__file__', '__package__'] +from import_tree.pkg.mod1 import not_existant, +#? 22 ['mod1', 'base'] +from import_tree.pkg. import mod1 +#? 17 ['mod1', 'mod2', 'random', 'pkg', 'references', 'rename1', 'rename2', 'classes', 'globals', 'recurse_class1', 'recurse_class2', 'invisible_pkg', 'flow_import'] +from import_tree. import pkg + +#? 18 ['pkg'] +from import_tree.p import pkg + +#? 17 ['import_tree'] +from .import_tree import +#? 10 ['run'] +from ..run import +#? ['run'] +from ..run +#? 10 ['run'] +from ..run. +#? [] +from ..run. + +#? ['run'] +from .. import run + +#? [] +from not_a_module import + + +#137 +import json +#? 23 json.dump +from json import load, dump +#? 17 json.load +from json import load, dump +# without the from clause: +import json, datetime +#? 7 json +import json, datetime +#? 13 datetime +import json, datetime + diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/ordering.py b/bundle/jedi-vim/pythonx/jedi/test/completion/ordering.py new file mode 100644 index 000000000..61eb19283 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/ordering.py @@ -0,0 +1,195 @@ +# ----------------- +# normal +# ----------------- +a = "" +a = 1 + +#? int() +a +#? [] +a.append + +a = list + +b = 1; b = "" +#? str() +b + +# temp should not be accessible before definition +#? [] +temp + +a = 1 +temp = b; +b = a +a = temp +#? int() +b +#? int() +b +#? str() +a + +a = tuple +if 1: + a = list + +#? ['append'] +a.append +#? ['index'] +a.index + +# ----------------- +# tuples exchanges +# ----------------- +a, b = 1, "" +#? int() +a +#? str() +b + +b, a = a, b +#? int() +b +#? str() +a + +b, a = a, b +#? int() +a +#? str() +b + +# ----------------- +# function +# ----------------- +def a(a=3): + #? int() + a + #? [] + a.func + return a + +#? int() +a(2) +#? [] +a(2).func + +a_param = 3 +def func(a_param): + # should not be int + #? [] + a_param. + +from os import path + + +# should not return a function, because `a` is a function above +def f(b, a): return a +#? [] +f(b=3). + +# ----------------- +# closure +# ----------------- + +def x(): + a = 0 + + def x(): + return a + + a = 3.0 + return x() + +#? float() +x() + +# ----------------- +# class +# ----------------- +class A(object): + a = "" + a = 3 + #? int() + a + a = list() + def __init__(self): + self.b = "" + + def before(self): + self.b = 3 + # TODO should this be so? include entries after cursor? + #? int() str() list + self.b + self.b = list + + self.a = 1 + #? str() int() + self.a + + #? ['after'] + self.after + + self.c = 3 + #? int() + self.c + + def after(self): + self.a = '' + + c = set() + +#? list() +A.a + +a = A() +#? ['after'] +a.after +#? [] +a.upper +#? [] +a.append +#? [] +a.real + +#? str() int() +a.a + +a = 3 +class a(): + def __init__(self, a): + self.a = a + +#? float() +a(1.0).a +#? +a().a + +# ----------------- +# imports +# ----------------- + +math = 3 +import math +#? ['cosh'] +math.cosh +#? [] +math.real + +math = 3 +#? int() +math +#? [] +math.cos + +# do the same for star imports +cosh = 3 +from math import * +# cosh doesn't work, but that's not a problem, star imports should be at the +# start of EVERY script! +cosh.real + +cosh = 3 +#? int() +cosh diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/parser.py b/bundle/jedi-vim/pythonx/jedi/test/completion/parser.py new file mode 100644 index 000000000..e0159cac4 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/parser.py @@ -0,0 +1,43 @@ +""" +Issues with the parser and not the type inference should be part of this file. +""" + +class IndentIssues(): + """ + issue jedi-vim#288 + Which is really a fast parser issue. It used to start a new block at the + parentheses, because it had problems with the indentation. + """ + def one_param( + self, + ): + return 1 + + def with_param( + self, + y): + return y + + + +#? int() +IndentIssues().one_param() + +#? str() +IndentIssues().with_param('') + + +""" +Just because there's a def keyword, doesn't mean it should not be able to +complete to definition. +""" +definition = 0 +#? ['definition'] +str(def + + +# It might be hard to determine the value +class Foo(object): + @property + #? ['str'] + def bar(x=str diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/pep0484_basic.py b/bundle/jedi-vim/pythonx/jedi/test/completion/pep0484_basic.py new file mode 100644 index 000000000..e20fe4408 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/pep0484_basic.py @@ -0,0 +1,200 @@ +""" Pep-0484 type hinting """ + + +class A(): + pass + + +def function_parameters(a: A, b, c: str, d: int, e: str, f: str, g: int=4): + """ + :param e: if docstring and annotation agree, only one should be returned + :type e: str + :param f: if docstring and annotation disagree, both should be returned + :type f: int + """ + #? A() + a + #? + b + #? str() + c + #? int() + d + #? str() + e + #? str() + f + # int() + g + + +def return_unspecified(): + pass + +#? +return_unspecified() + + +def return_none() -> None: + """ + Return type None means the same as no return type as far as jedi + is concerned + """ + pass + +#? None +return_none() + + +def return_str() -> str: + pass + +#? str() +return_str() + + +def return_custom_class() -> A: + pass + +#? A() +return_custom_class() + + +def return_annotation_and_docstring() -> str: + """ + :rtype: int + """ + pass + +#? str() +return_annotation_and_docstring() + + +def return_annotation_and_docstring_different() -> str: + """ + :rtype: str + """ + pass + +#? str() +return_annotation_and_docstring_different() + + +def annotation_forward_reference(b: "B") -> "B": + #? B() + b + +#? ["test_element"] +annotation_forward_reference(1).t + +class B: + test_element = 1 + pass + +#? B() +annotation_forward_reference(1) + + +class SelfReference: + test_element = 1 + def test_method(self, x: "SelfReference") -> "SelfReference": + #? SelfReference() + x + #? ["test_element", "test_method"] + self.t + #? ["test_element", "test_method"] + x.t + #? ["test_element", "test_method"] + self.test_method(1).t + +#? SelfReference() +SelfReference().test_method() + +def function_with_non_pep_0484_annotation( + x: "I can put anything here", + xx: "", + yy: "\r\n\0;+*&^564835(---^&*34", + y: 3 + 3, + zz: float) -> int("42"): + # infers int from function call + #? int() + x + # infers int from function call + #? int() + xx + # infers int from function call + #? int() + yy + # infers str from function call + #? str() + y + #? float() + zz +#? +function_with_non_pep_0484_annotation(1, 2, 3, "force string") + +def function_forward_reference_dynamic( + x: return_str_type(), + y: "return_str_type()") -> None: + #? str() + x + #? str() + y + +def return_str_type(): + return str + + +X = str +def function_with_assined_class_in_reference(x: X, y: "Y"): + #? str() + x + #? int() + y +Y = int + +def just_because_we_can(x: "flo" + "at"): + #? float() + x + + +def keyword_only(a: str, *, b: str): + #? ['startswith'] + a.startswi + #? ['startswith'] + b.startswi + + +def argskwargs(*args: int, **kwargs: float): + """ + This might be a bit confusing, but is part of the standard. + args is changed to Tuple[int] in this case and kwargs to Dict[str, float], + which makes sense if you think about it a bit. + """ + #? tuple() + args + #? int() + args[0] + #? str() + next(iter(kwargs.keys())) + #? float() + kwargs[''] + + +class NotCalledClass: + def __init__(self, x): + self.x: int = x + self.y: int = '' + #? int() + self.x + #? int() + self.y + #? int() + self.y + self.z: int + self.z = '' + #? str() int() + self.z + self.w: float + #? float() + self.w diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/pep0484_comments.py b/bundle/jedi-vim/pythonx/jedi/test/completion/pep0484_comments.py new file mode 100644 index 000000000..902d1a15f --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/pep0484_comments.py @@ -0,0 +1,200 @@ +a = 3 # type: str +#? str() +a + +b = 3 # type: str but I write more +#? int() +b + +c = 3 # type: str # I comment more +#? str() +c + +d = "It should not read comments from the next line" +# type: int +#? str() +d + +# type: int +e = "It should not read comments from the previous line" +#? str() +e + +class BB: pass + +def test(a, b): + a = a # type: BB + c = a # type: str + d = a + # type: str + e = a # type: str # Should ignore long whitespace + + #? BB() + a + #? str() + c + #? BB() + d + #? str() + e + +class AA: + class BB: + pass + +def test(a): + # type: (AA.BB) -> None + #? AA.BB() + a + +def test(a): + # type: (AA.BB,) -> None + #? AA.BB() + a + +a,b = 1, 2 # type: str, float +#? str() +a +#? float() +b + +class Employee: + pass + +from typing import List, Tuple +x = [] # type: List[Employee] +#? Employee() +x[1] +x, y, z = [], [], [] # type: List[int], List[int], List[str] +#? int() +y[2] +x, y, z = [], [], [] # type: (List[float], List[float], List[BB]) +for zi in z: + #? BB() + zi + +x = [ + 1, + 2, +] # type: List[str] + +#? str() +x[1] + + +for bar in foo(): # type: str + #? str() + bar + +for bar, baz in foo(): # type: int, float + #? int() + bar + #? float() + baz + +for bar, baz in foo(): + # type: str, str + """ type hinting on next line should not work """ + #? + bar + #? + baz + +with foo(): # type: int + ... + +with foo() as f: # type: str + #? str() + f + +with foo() as f: + # type: str + """ type hinting on next line should not work """ + #? + f + +aaa = some_extremely_long_function_name_that_doesnt_leave_room_for_hints() \ + # type: float # We should be able to put hints on the next line with a \ +#? float() +aaa + +# Test instance methods +class Dog: + def __init__(self, age, friends, name): + # type: (int, List[Tuple[str, Dog]], str) -> None + #? int() + self.age = age + self.friends = friends + + #? Dog() + friends[0][1] + + #? str() + self.name = name + + def friend_for_name(self, name): + # type: (str) -> Dog + for (friend_name, friend) in self.friends: + if friend_name == name: + return friend + raise ValueError() + + def bark(self): + pass + +buddy = Dog(UNKNOWN_NAME1, UNKNOWN_NAME2, UNKNOWN_NAME3) +friend = buddy.friend_for_name('buster') +# type of friend is determined by function return type +#! 9 ['def bark'] +friend.bark() + +friend = buddy.friends[0][1] +# type of friend is determined by function parameter type +#! 9 ['def bark'] +friend.bark() + +# type is determined by function parameter type following nested generics +#? str() +friend.name + +# Mypy comment describing function return type. +def annot(): + # type: () -> str + pass + +#? str() +annot() + +# Mypy variable type annotation. +x = UNKNOWN_NAME2 # type: str + +#? str() +x + +class Cat(object): + def __init__(self, age, friends, name): + # type: (int, List[Dog], str) -> None + self.age = age + self.friends = friends + self.name = name + +cat = Cat(UNKNOWN_NAME4, UNKNOWN_NAME5, UNKNOWN_NAME6) +#? str() +cat.name + + +# Check potential errors +def x(a, b): + # type: ([) -> a + #? + a +def x(a, b): + # type: (1) -> a + #? + a +def x(a, b, c): + # type: (str) -> a + #? + b + #? + c diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/pep0484_generic_mismatches.py b/bundle/jedi-vim/pythonx/jedi/test/completion/pep0484_generic_mismatches.py new file mode 100644 index 000000000..6f765c994 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/pep0484_generic_mismatches.py @@ -0,0 +1,339 @@ +import typing +from typing import ( + Callable, + Dict, + Generic, + List, + Sequence, + Tuple, + Type, + TypeVar, +) + +T = TypeVar('T') + + +def foo(x: T) -> T: + return x + + +class CustomGeneric(Generic[T]): + def __init__(self, val: T) -> None: + self.val = val + + +class PlainClass(object): + pass + + +tpl = ("1", 2) +tpl_typed = ("2", 3) # type: Tuple[str, int] + +collection = {"a": 1} +collection_typed = {"a": 1} # type: Dict[str, int] + +list_of_ints = [42] # type: List[int] +list_of_funcs = [foo] # type: List[Callable[[T], T]] + +custom_generic = CustomGeneric(123.45) + +plain_instance = PlainClass() + + +# Test that simple parameters are handled +def list_t_to_list_t(the_list: List[T]) -> List[T]: + return the_list + +x0 = list_t_to_list_t("abc")[0] +#? +x0 + +x1 = list_t_to_list_t(foo)[0] +#? +x1 + +x1 = list_t_to_list_t(typing)[0] +#? +x1 + +x2 = list_t_to_list_t(tpl)[0] +#? +x2 + +x3 = list_t_to_list_t(tpl_typed)[0] +#? +x3 + +x4 = list_t_to_list_t(collection)[0] +#? +x4 + +x5 = list_t_to_list_t(collection_typed)[0] +#? +x5 + +x6 = list_t_to_list_t(custom_generic)[0] +#? +x6 + +x7 = list_t_to_list_t(plain_instance)[0] +#? +x7 + +for a in list_t_to_list_t(12): + #? + a + + +# Test that simple parameters are handled +def list_type_t_to_list_t(the_list: List[Type[T]]) -> List[T]: + return [x() for x in the_list] + +x0 = list_type_t_to_list_t("abc")[0] +#? +x0 + +x1 = list_type_t_to_list_t(foo)[0] +#? +x1 + +x2 = list_type_t_to_list_t(tpl)[0] +#? +x2 + +x3 = list_type_t_to_list_t(tpl_typed)[0] +#? +x3 + +x4 = list_type_t_to_list_t(collection)[0] +#? +x4 + +x5 = list_type_t_to_list_t(collection_typed)[0] +#? +x5 + +x6 = list_type_t_to_list_t(custom_generic)[0] +#? +x6 + +x7 = list_type_t_to_list_t(plain_instance)[0] +#? +x7 + +for a in list_type_t_to_list_t(12): + #? + a + + +x0 = list_type_t_to_list_t(["abc"])[0] +#? +x0 + +x1 = list_type_t_to_list_t([foo])[0] +#? +x1 + +x2 = list_type_t_to_list_t([tpl])[0] +#? +x2 + +x3 = list_type_t_to_list_t([tpl_typed])[0] +#? +x3 + +x4 = list_type_t_to_list_t([collection])[0] +#? +x4 + +x5 = list_type_t_to_list_t([collection_typed])[0] +#? +x5 + +x6 = list_type_t_to_list_t([custom_generic])[0] +#? +x6 + +x7 = list_type_t_to_list_t([plain_instance])[0] +#? +x7 + +for a in list_type_t_to_list_t([12]): + #? + a + + +def list_func_t_to_list_t(the_list: List[Callable[[T], T]]) -> List[T]: + # Not actually a viable signature, but should be enough to test our handling + # of the generic parameters. + pass + + +x0 = list_func_t_to_list_t("abc")[0] +#? +x0 + +x1 = list_func_t_to_list_t(foo)[0] +#? +x1 + +x2 = list_func_t_to_list_t(tpl)[0] +#? +x2 + +x3 = list_func_t_to_list_t(tpl_typed)[0] +#? +x3 + +x4 = list_func_t_to_list_t(collection)[0] +#? +x4 + +x5 = list_func_t_to_list_t(collection_typed)[0] +#? +x5 + +x6 = list_func_t_to_list_t(custom_generic)[0] +#? +x6 + +x7 = list_func_t_to_list_t(plain_instance)[0] +#? +x7 + +for a in list_func_t_to_list_t(12): + #? + a + + +x0 = list_func_t_to_list_t(["abc"])[0] +#? +x0 + +x2 = list_func_t_to_list_t([tpl])[0] +#? +x2 + +x3 = list_func_t_to_list_t([tpl_typed])[0] +#? +x3 + +x4 = list_func_t_to_list_t([collection])[0] +#? +x4 + +x5 = list_func_t_to_list_t([collection_typed])[0] +#? +x5 + +x6 = list_func_t_to_list_t([custom_generic])[0] +#? +x6 + +x7 = list_func_t_to_list_t([plain_instance])[0] +#? +x7 + +for a in list_func_t_to_list_t([12]): + #? + a + + +def tuple_t(tuple_in: Tuple[T]]) -> Sequence[T]: + return tuple_in + + +x0 = list_t_to_list_t("abc")[0] +#? +x0 + +x1 = list_t_to_list_t(foo)[0] +#? +x1 + +x2 = list_t_to_list_t(tpl)[0] +#? +x2 + +x3 = list_t_to_list_t(tpl_typed)[0] +#? +x3 + +x4 = list_t_to_list_t(collection)[0] +#? +x4 + +x5 = list_t_to_list_t(collection_typed)[0] +#? +x5 + +x6 = list_t_to_list_t(custom_generic)[0] +#? +x6 + +x7 = list_t_to_list_t(plain_instance)[0] +#? +x7 + +for a in list_t_to_list_t(12): + #? + a + + +def tuple_t_elipsis(tuple_in: Tuple[T, ...]]) -> Sequence[T]: + return tuple_in + + +x0 = list_t_to_list_t("abc")[0] +#? +x0 + +x1 = list_t_to_list_t(foo)[0] +#? +x1 + +x2 = list_t_to_list_t(tpl)[0] +#? +x2 + +x3 = list_t_to_list_t(tpl_typed)[0] +#? +x3 + +x4 = list_t_to_list_t(collection)[0] +#? +x4 + +x5 = list_t_to_list_t(collection_typed)[0] +#? +x5 + +x6 = list_t_to_list_t(custom_generic)[0] +#? +x6 + +x7 = list_t_to_list_t(plain_instance)[0] +#? +x7 + +for a in list_t_to_list_t(12): + #? + a + + +def list_tuple_t_to_tuple_list_t(the_list: List[Tuple[T]]) -> Tuple[List[T], ...]: + return tuple(list(x) for x in the_list) + + +for b in list_tuple_t_to_tuple_list_t(list_of_ints): + #? + b[0] + + +def list_tuple_t_elipsis_to_tuple_list_t(the_list: List[Tuple[T, ...]]) -> Tuple[List[T], ...]: + return tuple(list(x) for x in the_list) + + +for b in list_tuple_t_to_tuple_list_t(list_of_ints): + #? + b[0] diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/pep0484_generic_parameters.py b/bundle/jedi-vim/pythonx/jedi/test/completion/pep0484_generic_parameters.py new file mode 100644 index 000000000..0be4e9cb2 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/pep0484_generic_parameters.py @@ -0,0 +1,389 @@ +from typing import ( + Callable, + Dict, + Generic, + Iterable, + List, + Mapping, + Optional, + Tuple, + Type, + TypeVar, + Union, + Sequence, +) + +K = TypeVar('K') +T = TypeVar('T') +T_co = TypeVar('T_co', covariant=True) +V = TypeVar('V') + + +just_float = 42. # type: float +optional_float = 42. # type: Optional[float] +list_of_ints = [42] # type: List[int] +list_of_floats = [42.] # type: List[float] +list_of_optional_floats = [x or None for x in list_of_floats] # type: List[Optional[float]] +list_of_ints_and_strs = [42, 'abc'] # type: List[Union[int, str]] + +# Test that simple parameters are handled +def list_t_to_list_t(the_list: List[T]) -> List[T]: + return the_list + +x0 = list_t_to_list_t(list_of_ints)[0] +#? int() +x0 + +for a in list_t_to_list_t(list_of_ints): + #? int() + a + +# Test that unions are handled +x2 = list_t_to_list_t(list_of_ints_and_strs)[0] +#? int() str() +x2 + +for z in list_t_to_list_t(list_of_ints_and_strs): + #? int() str() + z + + +list_of_int_type = [int] # type: List[Type[int]] + +# Test that nested parameters are handled +def list_optional_t_to_list_t(the_list: List[Optional[T]]) -> List[T]: + return [x for x in the_list if x is not None] + + +for xa in list_optional_t_to_list_t(list_of_optional_floats): + #? float() + xa + +# Under covariance rules this is strictly incorrect (because List is mutable, +# the function would be allowed to put `None`s into our List[float], which would +# be bad), however we don't expect jedi to enforce that. +for xa1 in list_optional_t_to_list_t(list_of_floats): + #? float() + xa1 + + +def optional_t_to_list_t(x: Optional[T]) -> List[T]: + return [x] if x is not None else [] + + +for xb in optional_t_to_list_t(optional_float): + #? float() + xb + + +for xb2 in optional_t_to_list_t(just_float): + #? float() + xb2 + + +def optional_list_t_to_list_t(x: Optional[List[T]]) -> List[T]: + return x if x is not None else [] + + +optional_list_float = None # type: Optional[List[float]] +for xc in optional_list_t_to_list_t(optional_list_float): + #? float() + xc + +for xc2 in optional_list_t_to_list_t(list_of_floats): + #? float() + xc2 + + +def list_type_t_to_list_t(the_list: List[Type[T]]) -> List[T]: + return [x() for x in the_list] + + +x1 = list_type_t_to_list_t(list_of_int_type)[0] +#? int() +x1 + + +for b in list_type_t_to_list_t(list_of_int_type): + #? int() + b + + +# Test construction of nested generic tuple return parameters +def list_t_to_list_tuple_t(the_list: List[T]) -> List[Tuple[T]]: + return [(x,) for x in the_list] + + +x1t = list_t_to_list_tuple_t(list_of_ints)[0][0] +#? int() +x1t + + +for c1 in list_t_to_list_tuple_t(list_of_ints): + #? int() + c1[0] + + +for c2, in list_t_to_list_tuple_t(list_of_ints): + #? int() + c2 + + +# Test handling of nested tuple input parameters +def list_tuple_t_to_tuple_list_t(the_list: List[Tuple[T]]) -> Tuple[List[T], ...]: + return tuple(list(x) for x in the_list) + + +list_of_int_tuples = [(x,) for x in list_of_ints] # type: List[Tuple[int]] + +for b in list_tuple_t_to_tuple_list_t(list_of_int_tuples): + #? int() + b[0] + + +def list_tuple_t_elipsis_to_tuple_list_t(the_list: List[Tuple[T, ...]]) -> Tuple[List[T], ...]: + return tuple(list(x) for x in the_list) + + +list_of_int_tuple_elipsis = [tuple(list_of_ints)] # type: List[Tuple[int, ...]] + +for b in list_tuple_t_elipsis_to_tuple_list_t(list_of_int_tuple_elipsis): + #? int() + b[0] + + +# Test handling of nested callables +def foo(x: int) -> int: + return x + + +list_of_funcs = [foo] # type: List[Callable[[int], int]] + +def list_func_t_to_list_func_type_t(the_list: List[Callable[[T], T]]) -> List[Callable[[Type[T]], T]]: + def adapt(func: Callable[[T], T]) -> Callable[[Type[T]], T]: + def wrapper(typ: Type[T]) -> T: + return func(typ()) + return wrapper + return [adapt(x) for x in the_list] + + +for b in list_func_t_to_list_func_type_t(list_of_funcs): + #? int() + b(int) + + +def bar(*a, **k) -> int: + return len(a) + len(k) + + +list_of_funcs_2 = [bar] # type: List[Callable[..., int]] + +def list_func_t_passthrough(the_list: List[Callable[..., T]]) -> List[Callable[..., T]]: + return the_list + + +for b in list_func_t_passthrough(list_of_funcs_2): + #? int() + b(None, x="x") + + +mapping_int_str = {42: 'a'} # type: Dict[int, str] + +# Test that mappings (that have more than one parameter) are handled +def invert_mapping(mapping: Mapping[K, V]) -> Mapping[V, K]: + return {v: k for k, v in mapping.items()} + +#? int() +invert_mapping(mapping_int_str)['a'] + + +# Test that the right type is chosen when a mapping is passed to something with +# only a single parameter. This checks that our inheritance checking picks the +# right thing. +def first(iterable: Iterable[T]) -> T: + return next(iter(iterable)) + +#? int() +first(mapping_int_str) + +# Test inference of str as an iterable of str. +#? str() +first("abc") + +some_str = NotImplemented # type: str +#? str() +first(some_str) + +annotated = [len] # type: List[ Callable[[Sequence[float]], int] ] +#? int() +first(annotated)() + +# Test that the right type is chosen when a partially realised mapping is expected +def values(mapping: Mapping[int, T]) -> List[T]: + return list(mapping.values()) + +#? str() +values(mapping_int_str)[0] + +x2 = values(mapping_int_str)[0] +#? str() +x2 + +for b in values(mapping_int_str): + #? str() + b + + +# +# Tests that user-defined generic types are handled +# +list_ints = [42] # type: List[int] + +class CustomGeneric(Generic[T_co]): + def __init__(self, val: T_co) -> None: + self.val = val + + +# Test extraction of type from a custom generic type +def custom(x: CustomGeneric[T]) -> T: + return x.val + +custom_instance = CustomGeneric(42) # type: CustomGeneric[int] + +#? int() +custom(custom_instance) + +x3 = custom(custom_instance) +#? int() +x3 + + +# Test construction of a custom generic type +def wrap_custom(iterable: Iterable[T]) -> List[CustomGeneric[T]]: + return [CustomGeneric(x) for x in iterable] + +#? int() +wrap_custom(list_ints)[0].val + +x4 = wrap_custom(list_ints)[0] +#? int() +x4.val + +for x5 in wrap_custom(list_ints): + #? int() + x5.val + + +# Test extraction of type from a nested custom generic type +list_custom_instances = [CustomGeneric(42)] # type: List[CustomGeneric[int]] + +def unwrap_custom(iterable: Iterable[CustomGeneric[T]]) -> List[T]: + return [x.val for x in iterable] + +#? int() +unwrap_custom(list_custom_instances)[0] + +x6 = unwrap_custom(list_custom_instances)[0] +#? int() +x6 + +for x7 in unwrap_custom(list_custom_instances): + #? int() + x7 + + +for xc in unwrap_custom([CustomGeneric(s) for s in 'abc']): + #? str() + xc + + +for xg in unwrap_custom(CustomGeneric(s) for s in 'abc'): + #? str() + xg + + +# Test extraction of type from type parameer nested within a custom generic type +custom_instance_list_int = CustomGeneric([42]) # type: CustomGeneric[List[int]] + +def unwrap_custom2(instance: CustomGeneric[Iterable[T]]) -> List[T]: + return list(instance.val) + +#? int() +unwrap_custom2(custom_instance_list_int)[0] + +x8 = unwrap_custom2(custom_instance_list_int)[0] +#? int() +x8 + +for x9 in unwrap_custom2(custom_instance_list_int): + #? int() + x9 + + +# Test that classes which have generic parents but are not generic themselves +# are still inferred correctly. +class Specialised(Mapping[int, str]): + pass + + +specialised_instance = NotImplemented # type: Specialised + +#? int() +first(specialised_instance) + +#? str() +values(specialised_instance)[0] + + +# Test that classes which have generic ancestry but neither they nor their +# parents are not generic are still inferred correctly. +class ChildOfSpecialised(Specialised): + pass + + +child_of_specialised_instance = NotImplemented # type: ChildOfSpecialised + +#? int() +first(child_of_specialised_instance) + +#? str() +values(child_of_specialised_instance)[0] + + +# Test that unbound generics are inferred as much as possible +class CustomPartialGeneric1(Mapping[str, T]): + pass + + +custom_partial1_instance = NotImplemented # type: CustomPartialGeneric1[int] + +#? str() +first(custom_partial1_instance) + + +custom_partial1_unbound_instance = NotImplemented # type: CustomPartialGeneric1 + +#? str() +first(custom_partial1_unbound_instance) + + +class CustomPartialGeneric2(Mapping[T, str]): + pass + + +custom_partial2_instance = NotImplemented # type: CustomPartialGeneric2[int] + +#? int() +first(custom_partial2_instance) + +#? str() +values(custom_partial2_instance)[0] + + +custom_partial2_unbound_instance = NotImplemented # type: CustomPartialGeneric2 + +#? [] +first(custom_partial2_unbound_instance) + +#? str() +values(custom_partial2_unbound_instance)[0] diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/pep0484_generic_passthroughs.py b/bundle/jedi-vim/pythonx/jedi/test/completion/pep0484_generic_passthroughs.py new file mode 100644 index 000000000..7c7b88209 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/pep0484_generic_passthroughs.py @@ -0,0 +1,351 @@ +from typing import ( + Any, + Callable, + Iterable, + List, + Sequence, + Tuple, + Type, + TypeVar, + Union, + Generic, +) + +T = TypeVar('T') +U = TypeVar('U') +TList = TypeVar('TList', bound=List[Any]) +TType = TypeVar('TType', bound=Type) +TTypeAny = TypeVar('TTypeAny', bound=Type[Any]) +TCallable = TypeVar('TCallable', bound=Callable[..., Any]) + +untyped_list_str = ['abc', 'def'] +typed_list_str = ['abc', 'def'] # type: List[str] + +untyped_tuple_str = ('abc',) +typed_tuple_str = ('abc',) # type: Tuple[str] + +untyped_tuple_str_int = ('abc', 4) +typed_tuple_str_int = ('abc', 4) # type: Tuple[str, int] + +variadic_tuple_str = ('abc',) # type: Tuple[str, ...] +variadic_tuple_str_int = ('abc', 4) # type: Tuple[Union[str, int], ...] + + +def untyped_passthrough(x): + return x + +def typed_list_generic_passthrough(x: List[T]) -> List[T]: + return x + +def typed_tuple_generic_passthrough(x: Tuple[T]) -> Tuple[T]: + return x + +def typed_multi_typed_tuple_generic_passthrough(x: Tuple[T, U]) -> Tuple[U, T]: + return x[1], x[0] + +def typed_variadic_tuple_generic_passthrough(x: Tuple[T, ...]) -> Sequence[T]: + return x + +def typed_iterable_generic_passthrough(x: Iterable[T]) -> Iterable[T]: + return x + +def typed_fully_generic_passthrough(x: T) -> T: + return x + +def typed_bound_generic_passthrough(x: TList) -> TList: + #? list() + x + + return x + +# Forward references are more likely with custom types, however this aims to +# test just the handling of the quoted type rather than any other part of the +# machinery. +def typed_quoted_return_generic_passthrough(x: T) -> 'List[T]': + return [x] + +def typed_quoted_input_generic_passthrough(x: 'Tuple[T]') -> T: + x + return x[0] + + +for a in untyped_passthrough(untyped_list_str): + #? str() + a + +for b in untyped_passthrough(typed_list_str): + #? str() + b + + +for c in typed_list_generic_passthrough(untyped_list_str): + #? str() + c + +for d in typed_list_generic_passthrough(typed_list_str): + #? str() + d + + +for e in typed_iterable_generic_passthrough(untyped_list_str): + #? str() + e + +for f in typed_iterable_generic_passthrough(typed_list_str): + #? str() + f + + +for g in typed_tuple_generic_passthrough(untyped_tuple_str): + #? str() + g + +for h in typed_tuple_generic_passthrough(typed_tuple_str): + #? str() + h + + +out_untyped = typed_multi_typed_tuple_generic_passthrough(untyped_tuple_str_int) +#? int() +out_untyped[0] +#? str() +out_untyped[1] + + +out_typed = typed_multi_typed_tuple_generic_passthrough(typed_tuple_str_int) +#? int() +out_typed[0] +#? str() +out_typed[1] + + +for j in typed_variadic_tuple_generic_passthrough(untyped_tuple_str_int): + #? str() int() + j + +for k in typed_variadic_tuple_generic_passthrough(typed_tuple_str_int): + #? str() int() + k + +for l in typed_variadic_tuple_generic_passthrough(variadic_tuple_str): + #? str() + l + +for m in typed_variadic_tuple_generic_passthrough(variadic_tuple_str_int): + #? str() int() + m + +#? float +typed_fully_generic_passthrough(float) + +for n in typed_fully_generic_passthrough(untyped_list_str): + #? str() + n + +for o in typed_fully_generic_passthrough(typed_list_str): + #? str() + o + + +for p in typed_bound_generic_passthrough(untyped_list_str): + #? str() + p + +for q in typed_bound_generic_passthrough(typed_list_str): + #? str() + q + + +for r in typed_quoted_return_generic_passthrough("something"): + #? str() + r + +for s in typed_quoted_return_generic_passthrough(42): + #? int() + s + + +#? str() +typed_quoted_input_generic_passthrough(("something",)) + +#? int() +typed_quoted_input_generic_passthrough((42,)) + + + +class CustomList(List): + def get_first(self): + return self[0] + + +#? str() +CustomList[str]()[0] +#? str() +CustomList[str]().get_first() + +#? str() +typed_fully_generic_passthrough(CustomList[str]())[0] +#? +typed_list_generic_passthrough(CustomList[str])[0] + + +def typed_bound_type_implicit_any_generic_passthrough(x: TType) -> TType: + #? Type() + x + return x + +def typed_bound_type_any_generic_passthrough(x: TTypeAny) -> TTypeAny: + # Should be Type(), though we don't get the handling of the nested argument + # to `Type[...]` quite right here. + x + return x + + +class MyClass: + pass + +def my_func(a: str, b: int) -> float: + pass + +#? MyClass +typed_fully_generic_passthrough(MyClass) + +#? MyClass() +typed_fully_generic_passthrough(MyClass()) + +#? my_func +typed_fully_generic_passthrough(my_func) + +#? CustomList() +typed_bound_generic_passthrough(CustomList[str]()) + +# should be list(), but we don't validate generic typevar upper bounds +#? int() +typed_bound_generic_passthrough(42) + +#? MyClass +typed_bound_type_implicit_any_generic_passthrough(MyClass) + +#? MyClass +typed_bound_type_any_generic_passthrough(MyClass) + +# should be Type(), but we don't validate generic typevar upper bounds +#? int() +typed_bound_type_implicit_any_generic_passthrough(42) + +# should be Type(), but we don't validate generic typevar upper bounds +#? int() +typed_bound_type_any_generic_passthrough(42) + + +def decorator(fn: TCallable) -> TCallable: + pass + + +def will_be_decorated(the_param: complex) -> float: + pass + + +is_decorated = decorator(will_be_decorated) + +#? will_be_decorated +is_decorated + +#? ['the_param='] +is_decorated(the_para +) + + +class class_decorator_factory_plain: + def __call__(self, func: T) -> T: + ... + +#? class_decorator_factory_plain() +class_decorator_factory_plain() + +#? +class_decorator_factory_plain()() + +is_decorated_by_class_decorator_factory = class_decorator_factory_plain()(will_be_decorated) + +#? will_be_decorated +is_decorated_by_class_decorator_factory + +#? ['the_param='] +is_decorated_by_class_decorator_factory(the_par +) + + +def decorator_factory_plain() -> Callable[[T], T]: + pass + +#? Callable() +decorator_factory_plain() + +#? +decorator_factory_plain()() + +#? int() +decorator_factory_plain()(42) + +is_decorated_by_plain_factory = decorator_factory_plain()(will_be_decorated) + +#? will_be_decorated +is_decorated_by_plain_factory + +#? ['the_param='] +is_decorated_by_plain_factory(the_par +) + + +class class_decorator_factory_bound_callable: + def __call__(self, func: TCallable) -> TCallable: + ... + +#? class_decorator_factory_bound_callable() +class_decorator_factory_bound_callable() + +#? Callable() +class_decorator_factory_bound_callable()() + +is_decorated_by_class_bound_factory = class_decorator_factory_bound_callable()(will_be_decorated) + +#? will_be_decorated +is_decorated_by_class_bound_factory + +#? ['the_param='] +is_decorated_by_class_bound_factory(the_par +) + + +def decorator_factory_bound_callable() -> Callable[[TCallable], TCallable]: + pass + +#? Callable() +decorator_factory_bound_callable() + +#? Callable() +decorator_factory_bound_callable()() + +is_decorated_by_bound_factory = decorator_factory_bound_callable()(will_be_decorated) + +#? will_be_decorated +is_decorated_by_bound_factory + +#? ['the_param='] +is_decorated_by_bound_factory(the_par +) + + +class That(Generic[T]): + def __init__(self, items: List[Tuple[str, T]]) -> None: + pass + + def get(self) -> T: + pass + +inst = That([("abc", 2)]) + +# No completions here, but should have completions for `int` +#? int() +inst.get() diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/pep0484_overload.py b/bundle/jedi-vim/pythonx/jedi/test/completion/pep0484_overload.py new file mode 100644 index 000000000..22c677045 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/pep0484_overload.py @@ -0,0 +1,89 @@ +# python >= 3.6 +from typing import List, Dict, overload, Tuple, TypeVar + +lst: list +list_alias: List +list_str: List[str] +list_str: List[int] + +# ------------------------- +# With base classes +# ------------------------- + +@overload +def overload_f2(value: List) -> str: ... +@overload +def overload_f2(value: Dict) -> int: ... + +#? str() +overload_f2(['']) +#? int() +overload_f2({1.0: 1.0}) +#? str() +overload_f2(lst) +#? str() +overload_f2(list_alias) +#? str() +overload_f2(list_str) + + +@overload +def overload_f3(value: list) -> str: ... +@overload +def overload_f3(value: dict) -> float: ... + +#? str() +overload_f3(['']) +#? float() +overload_f3({1.0: 1.0}) +#? str() +overload_f3(lst) +#? str() +overload_f3(list_alias) +#? str() +overload_f3(list_str) + +# ------------------------- +# Generics Matching +# ------------------------- + +@overload +def overload_f1(value: List[str]) -> str: ... + + +@overload +def overload_f1(value: Dict[str, str]) -> Dict[str, str]: ... + +def overload_f1(): + pass + +#? str() +overload_f1(['']) +#? str() dict() +overload_f1(1) +#? dict() +overload_f1({'': ''}) + +#? str() dict() +overload_f1(lst) +#? str() dict() +overload_f1(list_alias) +#? str() +overload_f1(list_str) +#? str() dict() +overload_f1(list_int) + +# ------------------------- +# Broken Matching +# ------------------------- +T = TypeVar('T') + +@overload +def broken_f1(value: 1) -> str: ... + +@overload +def broken_f1(value: Tuple[T]) -> Tuple[T]: ... + +tup: Tuple[float] +#? float() +broken_f1(broken_f1(tup))[0] diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/pep0484_typing.py b/bundle/jedi-vim/pythonx/jedi/test/completion/pep0484_typing.py new file mode 100644 index 000000000..dae4b9b3f --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/pep0484_typing.py @@ -0,0 +1,594 @@ +""" +Test the typing library, with docstrings and annotations +""" +import typing +class B: + pass + +def we_can_has_sequence(p, q, r, s, t, u): + """ + :type p: typing.Sequence[int] + :type q: typing.Sequence[B] + :type r: typing.Sequence[int] + :type s: typing.Sequence["int"] + :type t: typing.MutableSequence[dict] + :type u: typing.List[float] + """ + #? ["count"] + p.c + #? int() + p[1] + #? ["count"] + q.c + #? B() + q[1] + #? ["count"] + r.c + #? int() + r[1] + #? ["count"] + s.c + #? int() + s[1] + #? [] + s.a + #? ["append"] + t.a + #? dict() + t[1] + #? ["append"] + u.a + #? float() list() + u[1.0] + #? float() + u[1] + +def iterators(ps, qs, rs, ts): + """ + :type ps: typing.Iterable[int] + :type qs: typing.Iterator[str] + :type rs: typing.Sequence["ForwardReference"] + :type ts: typing.AbstractSet["float"] + """ + for p in ps: + #? int() + p + #? + next(ps) + a, b = ps + #? int() + a + ##? int() --- TODO fix support for tuple assignment + # https://github.com/davidhalter/jedi/pull/663#issuecomment-172317854 + # test below is just to make sure that in case it gets fixed by accident + # these tests will be fixed as well the way they should be + #? + b + + for q in qs: + #? str() + q + #? str() + next(qs) + for r in rs: + #? ForwardReference() + r + #? + next(rs) + for t in ts: + #? float() + t + +def sets(p, q): + """ + :type p: typing.AbstractSet[int] + :type q: typing.MutableSet[float] + """ + #? [] + p.a + #? ["add"] + q.a + +def tuple(p, q, r): + """ + :type p: typing.Tuple[int] + :type q: typing.Tuple[int, str, float] + :type r: typing.Tuple[B, ...] + """ + #? int() + p[0] + #? ['index'] + p.index + #? int() + q[0] + #? str() + q[1] + #? float() + q[2] + #? B() + r[0] + #? B() + r[1] + #? B() + r[2] + #? B() + r[10000] + i, s, f = q + #? int() + i + #? str() + s + #? float() + f + +class Key: + pass + +class Value: + pass + +def mapping(p, q, d, dd, r, s, t): + """ + :type p: typing.Mapping[Key, Value] + :type q: typing.MutableMapping[Key, Value] + :type d: typing.Dict[Key, Value] + :type dd: typing.DefaultDict[Key, Value] + :type r: typing.KeysView[Key] + :type s: typing.ValuesView[Value] + :type t: typing.ItemsView[Key, Value] + """ + #? [] + p.setd + #? ["setdefault"] + q.setd + #? ["setdefault"] + d.setd + #? ["setdefault"] + dd.setd + #? Value() + p[1] + for key in p: + #? Key() + key + for key in p.keys(): + #? Key() + key + for value in p.values(): + #? Value() + value + for item in p.items(): + #? Key() + item[0] + #? Value() + item[1] + (key, value) = item + #? Key() + key + #? Value() + value + for key, value in p.items(): + #? Key() + key + #? Value() + value + for key, value in q.items(): + #? Key() + key + #? Value() + value + for key, value in d.items(): + #? Key() + key + #? Value() + value + for key, value in dd.items(): + #? Key() + key + #? Value() + value + for key in r: + #? Key() + key + for value in s: + #? Value() + value + for key, value in t: + #? Key() + key + #? Value() + value + +def union(p, q, r, s, t): + """ + :type p: typing.Union[int] + :type q: typing.Union[int, int] + :type r: typing.Union[int, str, "int"] + :type s: typing.Union[int, typing.Union[str, "typing.Union['float', 'dict']"]] + :type t: typing.Union[int, None] + """ + #? int() + p + #? int() + q + #? int() str() + r + #? int() str() float() dict() + s + #? int() None + t + +def optional(p): + """ + :type p: typing.Optional[int] + Optional does not do anything special. However it should be recognised + as being of that type. Jedi doesn't do anything with the extra into that + it can be None as well + """ + #? int() None + p + +class ForwardReference: + pass + +class TestDict(typing.Dict[str, int]): + def setdud(self): + pass + +def testdict(x): + """ + :type x: TestDict + """ + #? ["setdud", "setdefault"] + x.setd + for key in x.keys(): + #? str() + key + for value in x.values(): + #? int() + value + +x = TestDict() +#? ["setdud", "setdefault"] +x.setd +for key in x.keys(): + #? str() + key +for value in x.values(): + #? int() + value + +WrappingType = typing.NewType('WrappingType', str) # Chosen arbitrarily +y = WrappingType(0) # Per https://github.com/davidhalter/jedi/issues/1015#issuecomment-355795929 +#? str() +y + +def testnewtype(y): + """ + :type y: WrappingType + """ + #? str() + y + #? ["upper"] + y.u + +WrappingType2 = typing.NewType() + +def testnewtype2(y): + """ + :type y: WrappingType2 + """ + #? + y + #? [] + y. + +# The type of a NewType is equivalent to the type of its underlying type. +MyInt = typing.NewType('MyInt', int) +x = type(MyInt) +#? type.mro +x.mro + +PlainInt = int +y = type(PlainInt) +#? type.mro +y.mro + +class TestDefaultDict(typing.DefaultDict[str, int]): + def setdud(self): + pass + +def testdict(x): + """ + :type x: TestDefaultDict + """ + #? ["setdud", "setdefault"] + x.setd + for key in x.keys(): + #? str() + key + for value in x.values(): + #? int() + value + +x = TestDefaultDict() +#? ["setdud", "setdefault"] +x.setd +for key in x.keys(): + #? str() + key +for value in x.values(): + #? int() + value + + +""" +docstrings have some auto-import, annotations can use all of Python's +import logic +""" +import typing as t +def union2(x: t.Union[int, str]): + #? int() str() + x +from typing import Union +def union3(x: Union[int, str]): + #? int() str() + x + +from typing import Union as U +def union4(x: U[int, str]): + #? int() str() + x + +#? typing.Optional +typing.Optional[0] + +# ------------------------- +# Type Vars +# ------------------------- + +TYPE_VARX = typing.TypeVar('TYPE_VARX') +TYPE_VAR_CONSTRAINTSX = typing.TypeVar('TYPE_VAR_CONSTRAINTSX', str, int) +#? ['__class__'] +TYPE_VARX.__clas +#! ["TYPE_VARX = typing.TypeVar('TYPE_VARX')"] +TYPE_VARX + + +class WithTypeVar(typing.Generic[TYPE_VARX]): + def lala(self) -> TYPE_VARX: + ... + + +def maaan(p: WithTypeVar[int]): + #? int() + p.lala() + +def in_out1(x: TYPE_VARX) -> TYPE_VARX: ... + +#? int() +in_out1(1) +#? str() +in_out1("") +#? str() +in_out1(str()) +#? +in_out1() + +def type_in_out1(x: typing.Type[TYPE_VARX]) -> TYPE_VARX: ... + +#? int() +type_in_out1(int) +#? str() +type_in_out1(str) +#? float() +type_in_out1(float) +#? +type_in_out1() + +def in_out2(x: TYPE_VAR_CONSTRAINTSX) -> TYPE_VAR_CONSTRAINTSX: ... + +#? int() +in_out2(1) +#? str() +in_out2("") +#? str() +in_out2(str()) +#? str() int() +in_out2() +# TODO this should actually be str() int(), because of the constraints. +#? float() +in_out2(1.0) + +def type_in_out2(x: typing.Type[TYPE_VAR_CONSTRAINTSX]) -> TYPE_VAR_CONSTRAINTSX: ... + +#? int() +type_in_out2(int) +#? str() +type_in_out2(str) +#? str() int() +type_in_out2() +# TODO this should actually be str() int(), because of the constraints. +#? float() +type_in_out2(float) + +def ma(a: typing.Callable[[str], TYPE_VARX]) -> typing.Callable[[str], TYPE_VARX]: + #? typing.Callable() + return a + +def mf(s: str) -> int: + return int(s) + +#? int() +ma(mf)('2') + +def xxx(x: typing.Iterable[TYPE_VARX]) -> typing.Tuple[str, TYPE_VARX]: ... + +#? str() +xxx([0])[0] +#? int() +xxx([0])[1] +#? +xxx([0])[2] + +def call_pls() -> typing.Callable[[TYPE_VARX], TYPE_VARX]: ... +#? int() +call_pls()(1) + +def call2_pls() -> typing.Callable[[str, typing.Callable[[int], TYPE_VARX]], TYPE_VARX]: ... +#? float() +call2_pls('')(1, lambda x: 3.0) + +def call3_pls() -> typing.Callable[[typing.Callable[[int], TYPE_VARX]], typing.List[TYPE_VARX]]: ... +def the_callable() -> float: ... +#? float() +call3_pls()(the_callable)[0] + +def call4_pls(fn: typing.Callable[..., TYPE_VARX]) -> typing.Callable[..., TYPE_VARX]: + return "" + +#? int() +call4_pls(lambda x: 1)() + +# ------------------------- +# TYPE_CHECKING +# ------------------------- + +if typing.TYPE_CHECKING: + with_type_checking = 1 +else: + without_type_checking = 1.0 +#? int() +with_type_checking +#? +without_type_checking + +def foo(a: typing.List, b: typing.Dict, c: typing.MutableMapping) -> typing.Type[int]: + #? ['append'] + a.appen + #? list() + a + #? + a[0] + #? ['setdefault'] + b.setd + #? ['setdefault'] + c.setd + #? typing.MutableMapping() + c + #? + c['asdf'] +#? int +foo() + +# ------------------------- +# cast +# ------------------------- + +def cast_tests(): + x = 3.0 + y = typing.cast(int, x) + #? int() + y + return typing.cast(str, x) + + +#? str() +cast_tests() + + +# ------------------------- +# dynamic +# ------------------------- + +def dynamic_annotation(x: int): + #? int() + return x + +#? int() +dynamic_annotation('') + +# ------------------------- +# TypeDict +# ------------------------- + +# python >= 3.8 + +class Foo(typing.TypedDict): + foo: str + bar: typing.List[float] + an_int: int + #! ['foo: str'] + foo + #? str() + foo + #? int() + an_int + +def typed_dict_test_foo(arg: Foo): + a_string = arg['foo'] + a_list_of_floats = arg['bar'] + an_int = arg['an_int'] + + #? str() + a_string + #? list() + a_list_of_floats + #? float() + a_list_of_floats[0] + #? int() + an_int + + #? ['isupper'] + a_string.isuppe + #? ['pop'] + a_list_of_floats.po + #? ['as_integer_ratio'] + an_int.as_integer_rati + +#! ['class Foo'] +d: Foo +#? str() +d['foo'] +#? float() +d['bar'][0] +#? +d['baz'] + +#? +d.foo +#? +d.bar +#! [] +d.foo + +#? [] +Foo.set +#? ['setdefault'] +d.setdefaul +#? [] +Foo.setdefaul + +#? 5 ["'foo"] +d['fo'] +#? 5 ['"bar"'] +d["bar"] + +class Bar(Foo): + another_variable: int + + #? int() + another_variable + #? + an_int + +def typed_dict_test_foo(arg: Bar): + #? str() + arg['foo'] + #? list() + arg['bar'] + #? float() + arg['bar'][0] + #? int() + arg['an_int'] + #? int() + arg['another_variable'] diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/pep0526_variables.py b/bundle/jedi-vim/pythonx/jedi/test/completion/pep0526_variables.py new file mode 100644 index 000000000..391683395 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/pep0526_variables.py @@ -0,0 +1,115 @@ +""" +PEP 526 introduced a new way of using type annotations on variables. It was +introduced in Python 3.6. +""" +# python >= 3.6 + +import typing + +asdf = '' +asdf: int +# This is not necessarily correct, but for now this is ok (at least no error). +#? int() +asdf + + +direct: int = NOT_DEFINED +#? int() +direct + +with_typing_module: typing.List[float] = NOT_DEFINED +#? float() +with_typing_module[0] + +somelist = [1, 2, 3, "A", "A"] +element : int +for element in somelist: + #? int() + element + +test_string: str = NOT_DEFINED +#? str() +test_string + + +char: str +for char in NOT_DEFINED: + #? str() + char + + +# ------------------------- +# instance/class vars +# ------------------------- + +class Foo(): + bar: int + baz: typing.ClassVar[str] + + +#? +Foo.bar +#? int() +Foo().bar +#? str() +Foo.baz +#? str() +Foo().baz + +class VarClass: + var_instance1: int = '' + var_instance2: float + var_class1: typing.ClassVar[str] = 1 + var_class2: typing.ClassVar[bytes] + + def __init__(self): + #? int() + d.var_instance1 + #? float() + d.var_instance2 + #? str() + d.var_class1 + #? bytes() + d.var_class2 + #? [] + d.int + #? ['var_class1', 'var_class2', 'var_instance1', 'var_instance2'] + self.var_ + + +#? ['var_class1', 'var_class2', 'var_instance1'] +VarClass.var_ +#? int() +VarClass.var_instance1 +#? +VarClass.var_instance2 +#? str() +VarClass.var_class1 +#? bytes() +VarClass.var_class2 +#? [] +VarClass.int + +d = VarClass() +#? ['var_class1', 'var_class2', 'var_instance1', 'var_instance2'] +d.var_ +#? int() +d.var_instance1 +#? float() +d.var_instance2 +#? str() +d.var_class1 +#? bytes() +d.var_class2 +#? [] +d.int + + + +import dataclasses +@dataclasses.dataclass +class DC: + name: int = 1 + +#? int() +DC().name diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/positional_only_params.py b/bundle/jedi-vim/pythonx/jedi/test/completion/positional_only_params.py new file mode 100644 index 000000000..3c1108d91 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/positional_only_params.py @@ -0,0 +1,35 @@ +# python >= 3.8 + +def positional_only_call(a, /, b): + #? str() + a + #? int() + b + if UNDEFINED: + return a + else: + return b + + +#? int() str() +positional_only_call('', 1) + + +def positional_only_call2(a, /, b=3): + if UNDEFINED: + return a + else: + return b + +#? int() +positional_only_call2(1) +#? int() +positional_only_call2(SOMETHING_UNDEFINED) +#? str() +positional_only_call2(SOMETHING_UNDEFINED, '') + +# Maybe change this? Because it's actually not correct +#? int() str() +positional_only_call2(a=1, b='') +#? tuple str() +positional_only_call2(b='', a=tuple) diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/precedence.py b/bundle/jedi-vim/pythonx/jedi/test/completion/precedence.py new file mode 100644 index 000000000..71c66e1fe --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/precedence.py @@ -0,0 +1,192 @@ +""" +Test Jedi's operation understanding. Jedi should understand simple additions, +multiplications, etc. +""" +# ----------------- +# numbers +# ----------------- +x = [1, 'a', 1.0] + +#? int() str() float() +x[12] + +#? float() +x[1 + 1] + +index = 0 + 1 + +#? str() +x[index] + +#? int() +x[1 + (-1)] + +def calculate(number): + return number + constant + +constant = 1 + +#? float() +x[calculate(1)] + +def calculate(number): + return number + constant + +# ----------------- +# strings +# ----------------- + +x = 'upp' + 'e' + +#? str.upper +getattr(str, x + 'r') + +a = "a"*3 +#? str() +a +a = 3 * "a" +#? str() +a + +a = 3 * "a" +#? str() +a + +#? int() +(3 ** 3) +#? int() +(3 ** 'a') +#? int() +(3 + 'a') +#? bool() +(3 == 'a') +#? bool() +(3 >= 'a') + +class X(): + foo = 2 +#? int() +(X.foo ** 3) + +# ----------------- +# assignments +# ----------------- + +x = [1, 'a', 1.0] + +i = 0 +i += 1 +i += 1 +#? float() +x[i] + +i = 1 +i += 1 +i -= 3 +i += 1 +#? int() +x[i] + +# ----------------- +# in +# ----------------- + +if 'X' in 'Y': + a = 3 +else: + a = '' +# For now don't really check for truth values. So in should return both +# results. +#? str() int() +a + +if 'X' not in 'Y': + b = 3 +else: + b = '' +# For now don't really check for truth values. So in should return both +# results. +#? str() int() +b + +# ----------------- +# for flow assignments +# ----------------- + +class FooBar(object): + fuu = 0.1 + raboof = 'fourtytwo' + +# targets should be working +target = '' +for char in ['f', 'u', 'u']: + target += char +#? float() +getattr(FooBar, target) + +# github #24 +target = u'' +for char in reversed(['f', 'o', 'o', 'b', 'a', 'r']): + target += char + +#? str() +getattr(FooBar, target) + + +# ----------------- +# repetition problems -> could be very slow and memory expensive - shouldn't +# be. +# ----------------- + +b = [str(1)] +l = list +for x in [l(0), l(1), l(2), l(3), l(4), l(5), l(6), l(7), l(8), l(9), l(10), + l(11), l(12), l(13), l(14), l(15), l(16), l(17), l(18), l(19), l(20), + l(21), l(22), l(23), l(24), l(25), l(26), l(27), l(28), l(29)]: + b += x + +#? str() +b[1] + + +# ----------------- +# undefined names +# ----------------- +a = foobarbaz + 'hello' + +#? int() float() +{'hello': 1, 'bar': 1.0}[a] + +# ----------------- +# stubs +# ----------------- + +from datetime import datetime, timedelta + +#? +(datetime - timedelta) +#? datetime() +(datetime() - timedelta()) +#? timedelta() +(datetime() - datetime()) +#? timedelta() +(timedelta() - datetime()) +#? timedelta() +(timedelta() - timedelta()) + +# ----------------- +# magic methods +# ----------------- + +class C: + def __sub__(self, other) -> int: ... + def __radd__(self, other) -> float: ... + +#? int() +(C() - object()) +#? C() object() +(object() - C()) +#? C() object() +(C() + object()) +#? float() +(object() + C()) diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/pytest.py b/bundle/jedi-vim/pythonx/jedi/test/completion/pytest.py new file mode 100644 index 000000000..a900dcdad --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/pytest.py @@ -0,0 +1,185 @@ +from typing import Generator + +import pytest +from pytest import fixture + + +@pytest.fixture(scope='module') +def my_fixture() -> str: + pass + + +@fixture +def my_simple_fixture(): + return 1 + + +@fixture +def my_yield_fixture(): + yield 1 + + +@fixture +class MyClassFixture(): + pass + +# ----------------- +# goto/infer +# ----------------- + +#! 18 ['def my_conftest_fixture'] +def test_x(my_conftest_fixture, my_fixture, my_not_existing_fixture, my_yield_fixture): + #? str() + my_fixture + #? int() + my_yield_fixture + #? + my_not_existing_fixture + #? float() + return my_conftest_fixture + +#? 18 float() +def test_x(my_conftest_fixture, my_fixture): + pass + + +#! 18 ['param MyClassFixture'] +def test_x(MyClassFixture): + #? + MyClassFixture + +#? 15 +def lala(my_fixture): + pass + +@pytest.fixture +#? 15 str() +def lala(my_fixture): + pass + +#! 15 ['param my_fixture'] +def lala(my_fixture): + pass + +@pytest.fixture +#! 15 ['def my_fixture'] +def lala(my_fixture): + pass + +# overriding types of a fixture should be possible +def test_x(my_yield_fixture: str): + #? str() + my_yield_fixture + +# ----------------- +# completion +# ----------------- + +#? 34 ['my_fixture'] +def test_x(my_simple_fixture, my_fixture): + return +#? 34 ['my_fixture'] +def test_x(my_simple_fixture, my_fixture): + return +#? ['my_fixture'] +def test_x(my_simple_fixture, my_f + return +#? 18 ['my_simple_fixture'] +def test_x(my_simple_fixture): + return +#? ['my_simple_fixture'] +def test_x(my_simp + return +#? ['my_conftest_fixture'] +def test_x(my_con + return +#? 18 ['my_conftest_fixture'] +def test_x(my_conftest_fixture): + return + +#? [] +def lala(my_con + return + +@pytest.fixture +#? ['my_conftest_fixture'] +def lala(my_con + return + +@pytest.fixture +#? 15 ['my_conftest_fixture'] +def lala(my_con): + return + +@pytest.fixture +@some_decorator +#? ['my_conftest_fixture'] +def lala(my_con + return + +@pytest.fixture +@some_decorator +#? 15 ['my_conftest_fixture'] +def lala(my_con): + return + +# ----------------- +# pytest owned fixtures +# ----------------- + +#? ['monkeypatch'] +def test_p(monkeyp + + +#! 15 ['def monkeypatch'] +def test_p(monkeypatch): + #? ['setattr'] + monkeypatch.setatt + +#? ['capsysbinary'] +def test_p(capsysbin + +#? ['tmpdir', 'tmpdir_factory'] +def test_p(tmpdi + + +def close_parens(): + pass +# ----------------- +# inheritance +# ----------------- + +@fixture +#? 40 ['inheritance_fixture'] +def inheritance_fixture(inheritance_fixture): + #? str() + inheritance_fixture + #? ['upper'] + inheritance_fixture.upper + return 1 + + +#! 48 ['def inheritance_fixture'] +def test_inheritance_fixture(inheritance_fixture, caplog): + #? int() + inheritance_fixture + + #? ['set_level'] + caplog.set_le + + +@pytest.fixture +def caplog(caplog): + yield caplog + +# ----------------- +# Generator with annotation +# ----------------- + +@pytest.fixture +def with_annot() -> Generator[float, None, None]: + pass + +def test_with_annot(inheritance_fixture, with_annot): + #? float() + with_annot diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/recursion.py b/bundle/jedi-vim/pythonx/jedi/test/completion/recursion.py new file mode 100644 index 000000000..e595497f7 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/recursion.py @@ -0,0 +1,117 @@ +""" +Code that might cause recursion issues (or has caused in the past). +""" + +def Recursion(): + def recurse(self): + self.a = self.a + self.b = self.b.recurse() + +#? +Recursion().a + +#? +Recursion().b + + +class X(): + def __init__(self): + self.recursive = [1, 3] + + def annoying(self): + self.recursive = [self.recursive[0]] + + def recurse(self): + self.recursive = [self.recursive[1]] + +#? int() +X().recursive[0] + + +def to_list(iterable): + return list(set(iterable)) + + +def recursion1(foo): + return to_list(to_list(foo)) + recursion1(foo) + +#? int() +recursion1([1,2])[0] + + +class FooListComp(): + def __init__(self): + self.recursive = [1] + + def annoying(self): + self.recursive = [x for x in self.recursive] + + +#? int() +FooListComp().recursive[0] + + +class InstanceAttributeIfs: + def b(self): + self.a1 = 1 + self.a2 = 1 + + def c(self): + self.a2 = '' + + def x(self): + self.b() + + if self.a1 == 1: + self.a1 = self.a1 + 1 + if self.a2 == UNDEFINED: + self.a2 = self.a2 + 1 + + #? int() + self.a1 + #? int() str() + self.a2 + +#? int() +InstanceAttributeIfs().a1 +#? int() str() +InstanceAttributeIfs().a2 + + + +class A: + def a(self, b): + for x in [self.a(i) for i in b]: + #? + x + +class B: + def a(self, b): + for i in b: + for i in self.a(i): + #? + yield i + + +foo = int +foo = foo # type: foo +#? int +foo + +while True: + bar = int + bar = bar # type: bar + #? int() + bar + + +class Comprehension: + def __init__(self, foo): + self.foo = foo + + def update(self): + self.foo = (self.foo,) + + +#? int() tuple() +Comprehension(1).foo[0] diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/stdlib.py b/bundle/jedi-vim/pythonx/jedi/test/completion/stdlib.py new file mode 100644 index 000000000..94eb53280 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/stdlib.py @@ -0,0 +1,476 @@ +""" +std library stuff +""" + +# ----------------- +# builtins +# ----------------- +arr = [''] + +#? str() +sorted(arr)[0] + +#? str() +next(reversed(arr)) +next(reversed(arr)) + +# should not fail if there's no return value. +def yielder(): + yield None + +#? None +next(reversed(yielder())) + +# empty reversed should not raise an error +#? +next(reversed()) + +#? str() bytes() +next(open('')) + +#? int() +{'a':2}.setdefault('a', 3) + +# Compiled classes should have the meta class attributes. +#? ['__itemsize__'] +tuple.__itemsize__ +#? [] +tuple().__itemsize__ + +# ----------------- +# type() calls with one parameter +# ----------------- +#? int +type(1) +#? int +type(int()) +#? type +type(int) +#? type +type(type) +#? list +type([]) + +def x(): + yield 1 +generator = type(x()) +#? generator +type(x for x in []) +#? type(x) +type(lambda: x) + +import math +import os +#? type(os) +type(math) +class X(): pass +#? type +type(X) + +# ----------------- +# type() calls with multiple parameters +# ----------------- + +X = type('X', (object,), dict(a=1)) + +# Doesn't work yet. +#? +X.a +#? +X + +if os.path.isfile(): + #? ['abspath'] + fails = os.path.abspath + +# The type vars and other underscored things from typeshed should not be +# findable. +#? +os._T + + +with open('foo') as f: + for line in f.readlines(): + #? str() bytes() + line +# ----------------- +# enumerate +# ----------------- +for i, j in enumerate(["as", "ad"]): + #? int() + i + #? str() + j + +# ----------------- +# re +# ----------------- +import re +c = re.compile(r'a') +# re.compile should not return str -> issue #68 +#? [] +c.startswith +#? int() +c.match().start() + +#? int() +re.match(r'a', 'a').start() + +for a in re.finditer('a', 'a'): + #? int() + a.start() + +# ----------------- +# ref +# ----------------- +import weakref + +#? int() +weakref.proxy(1) + +#? weakref.ref() +weakref.ref(1) +#? int() None +weakref.ref(1)() + +# ----------------- +# sqlite3 (#84) +# ----------------- + +import sqlite3 +#? sqlite3.Connection() +con = sqlite3.connect() +#? sqlite3.Cursor() +c = con.cursor() + +def huhu(db): + """ + :type db: sqlite3.Connection + :param db: the db connection + """ + #? sqlite3.Connection() + db + +with sqlite3.connect() as c: + #? sqlite3.Connection() + c + +# ----------------- +# hashlib +# ----------------- + +import hashlib + +#? ['md5'] +hashlib.md5 + +# ----------------- +# copy +# ----------------- + +import copy +#? int() +copy.deepcopy(1) + +#? +copy.copy() + +# ----------------- +# json +# ----------------- + +# We don't want any results for json, because it depends on IO. +import json +#? +json.load('asdf') +#? +json.loads('[1]') + +# ----------------- +# random +# ----------------- + +import random +class A(object): + def say(self): pass +class B(object): + def shout(self): pass +cls = random.choice([A, B]) +#? ['say', 'shout'] +cls().s + +# ----------------- +# random +# ----------------- + +import zipfile +z = zipfile.ZipFile("foo") +#? ['upper'] +z.read('name').upper + +# ----------------- +# contextlib +# ----------------- + +from typing import Iterator +import contextlib +with contextlib.closing('asd') as string: + #? str() + string + +@contextlib.contextmanager +def cm1() -> Iterator[float]: + yield 1 +with cm1() as x: + #? float() + x + +@contextlib.contextmanager +def cm2() -> float: + yield 1 +with cm2() as x: + #? + x + +@contextlib.contextmanager +def cm3(): + yield 3 +with cm3() as x: + #? int() + x + +# ----------------- +# operator +# ----------------- + +import operator + +f = operator.itemgetter(1) +#? float() +f([1.0]) +#? str() +f([1, '']) + +g = operator.itemgetter(1, 2) +x1, x2 = g([1, 1.0, '']) +#? float() +x1 +#? str() +x2 + +x1, x2 = g([1, '']) +#? str() +x1 +#? int() str() +x2 + +# ----------------- +# shlex +# ----------------- + +# Github issue #929 +import shlex +qsplit = shlex.split("foo, ferwerwerw werw werw e") +for part in qsplit: + #? str() + part + +# ----------------- +# staticmethod, classmethod params +# ----------------- + +class F(): + def __init__(self): + self.my_variable = 3 + + @staticmethod + def my_func(param): + #? [] + param.my_ + #? ['upper'] + param.uppe + #? str() + return param + + @staticmethod + def my_func_without_call(param): + #? [] + param.my_ + #? [] + param.uppe + #? + return param + + @classmethod + def my_method_without_call(cls, param): + #? + cls.my_variable + #? ['my_method', 'my_method_without_call'] + cls.my_meth + #? + return param + + @classmethod + def my_method(cls, param): + #? + cls.my_variable + #? ['my_method', 'my_method_without_call'] + cls.my_meth + #? + return param + +#? str() +F.my_func('') +#? str() +F.my_method('') + +# ----------------- +# Unknown metaclass +# ----------------- + +# Github issue 1321 +class Meta(object): + pass + +class Test(metaclass=Meta): + def test_function(self): + result = super(Test, self).test_function() + #? [] + result. + +# ----------------- +# Enum +# ----------------- + +import enum + +class X(enum.Enum): + attr_x = 3 + attr_y = 2.0 + +#? ['mro'] +X.mro +#? ['attr_x', 'attr_y'] +X.attr_ +#? str() +X.attr_x.name +#? int() +X.attr_x.value +#? str() +X.attr_y.name +#? float() +X.attr_y.value +#? str() +X().name +#? float() +X().attr_x.attr_y.value + +# ----------------- +# functools +# ----------------- +import functools + +basetwo = functools.partial(int, base=2) +#? int() +basetwo() + +def function(a, b): + return a, b +a = functools.partial(function, 0) + +#? int() +a('')[0] +#? str() +a('')[1] + +kw = functools.partial(function, b=1.0) +tup = kw(1) +#? int() +tup[0] +#? float() +tup[1] + +def my_decorator(f): + @functools.wraps(f) + def wrapper(*args, **kwds): + return f(*args, **kwds) + return wrapper + +@my_decorator +def example(a): + return a + +#? str() +example('') + +# From GH #1574 +#? float() +functools.wraps(functools.partial(str, 1))(lambda: 1.0)() + +class X: + def function(self, a, b): + return a, b + a = functools.partialmethod(function, 0) + kw = functools.partialmethod(function, b=1.0) + just_partial = functools.partial(function, 1, 2.0) + +#? int() +X().a('')[0] +#? str() +X().a('')[1] + +# The access of partialmethods on classes are not 100% correct. This doesn't +# really matter, because nobody uses it like that anyway and would take quite a +# bit of work to fix all of these cases. +#? str() +X.a('')[0] +#? +X.a('')[1] + +#? X() +X.a(X(), '')[0] +#? str() +X.a(X(), '')[1] + +tup = X().kw(1) +#? int() +tup[0] +#? float() +tup[1] + +tup = X.kw(1) +#? +tup[0] +#? float() +tup[1] + +tup = X.kw(X(), 1) +#? int() +tup[0] +#? float() +tup[1] + +#? float() +X.just_partial('')[0] +#? str() +X.just_partial('')[1] +#? float() +X().just_partial('')[0] +#? str() +X().just_partial('')[1] + +# python >= 3.8 + +@functools.lru_cache +def x() -> int: ... +@functools.lru_cache() +def y() -> float: ... +@functools.lru_cache(8) +def z() -> str: ... + +#? int() +x() +#? float() +y() +#? str() +z() diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/stub_only.pyi b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/stub_only.pyi new file mode 100644 index 000000000..8dc159000 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/stub_only.pyi @@ -0,0 +1,9 @@ +in_stub_only: int + + +class Foo(Bar): + pass + + +class Bar: + pass diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/stub_only_folder/__init__.pyi b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/stub_only_folder/__init__.pyi new file mode 100644 index 000000000..77cf2466a --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/stub_only_folder/__init__.pyi @@ -0,0 +1 @@ +in_stub_only_folder: int diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/stub_only_folder/nested_stub_only.pyi b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/stub_only_folder/nested_stub_only.pyi new file mode 100644 index 000000000..4f2f42392 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/stub_only_folder/nested_stub_only.pyi @@ -0,0 +1 @@ +in_stub_only: int diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/stub_only_folder/nested_with_stub.py b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/stub_only_folder/nested_with_stub.py new file mode 100644 index 000000000..0f9111f49 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/stub_only_folder/nested_with_stub.py @@ -0,0 +1,2 @@ +in_python = '' +in_both = '' diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/stub_only_folder/nested_with_stub.pyi b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/stub_only_folder/nested_with_stub.pyi new file mode 100644 index 000000000..53dc26536 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/stub_only_folder/nested_with_stub.pyi @@ -0,0 +1,2 @@ +in_stub: int +in_both: float diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/stub_only_folder/python_only.py b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/stub_only_folder/python_only.py new file mode 100644 index 000000000..23370e0c8 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/stub_only_folder/python_only.py @@ -0,0 +1 @@ +in_python = '' diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub.py b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub.py new file mode 100644 index 000000000..a471fa911 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub.py @@ -0,0 +1,9 @@ +in_with_stub_both = 5 +in_with_stub_python = 8 + + +def stub_function(x: float, y): + """ + Python docstring + """ + return 1 diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub.pyi b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub.pyi new file mode 100644 index 000000000..973528cc3 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub.pyi @@ -0,0 +1,6 @@ +in_with_stub_both: str +in_with_stub_stub: float + + +def stub_function(x: int, y: float) -> str: + pass diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub_folder/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub_folder/__init__.py new file mode 100644 index 000000000..4201289b4 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub_folder/__init__.py @@ -0,0 +1,2 @@ +in_with_stub_both_folder = 5 +in_with_stub_python_folder = 8 diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub_folder/__init__.pyi b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub_folder/__init__.pyi new file mode 100644 index 000000000..ea7ec38c4 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub_folder/__init__.pyi @@ -0,0 +1,2 @@ +in_with_stub_both_folder: str +in_with_stub_stub_folder: float diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub_folder/nested_stub_only.pyi b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub_folder/nested_stub_only.pyi new file mode 100644 index 000000000..6b19f90b4 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub_folder/nested_stub_only.pyi @@ -0,0 +1,4 @@ +if 1: + in_stub_only: int +else: + in_stub_only: int diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub_folder/nested_with_stub.py b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub_folder/nested_with_stub.py new file mode 100644 index 000000000..0f9111f49 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub_folder/nested_with_stub.py @@ -0,0 +1,2 @@ +in_python = '' +in_both = '' diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub_folder/nested_with_stub.pyi b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub_folder/nested_with_stub.pyi new file mode 100644 index 000000000..53dc26536 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub_folder/nested_with_stub.pyi @@ -0,0 +1,2 @@ +in_stub: int +in_both: float diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub_folder/python_only.py b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub_folder/python_only.py new file mode 100644 index 000000000..23370e0c8 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/stub_folder/with_stub_folder/python_only.py @@ -0,0 +1 @@ +in_python = '' diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/stubs.py b/bundle/jedi-vim/pythonx/jedi/test/completion/stubs.py new file mode 100644 index 000000000..da9523720 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/stubs.py @@ -0,0 +1,104 @@ +from stub_folder import with_stub, stub_only, with_stub_folder, stub_only_folder + +# ------------------------- +# Just files +# ------------------------- + +#? int() +stub_only.in_stub_only +#? str() +with_stub.in_with_stub_both +#? int() +with_stub.in_with_stub_python +#? float() +with_stub.in_with_stub_stub + +#! ['in_stub_only: int'] +stub_only.in_stub_only +#! ['in_with_stub_both = 5'] +with_stub.in_with_stub_both +#! ['in_with_stub_python = 8'] +with_stub.in_with_stub_python +#! ['in_with_stub_stub: float'] +with_stub.in_with_stub_stub + +#? ['in_stub_only'] +stub_only.in_ +#? ['in_stub_only'] +from stub_folder.stub_only import in_ +#? ['in_with_stub_both', 'in_with_stub_python', 'in_with_stub_stub'] +with_stub.in_ +#? ['in_with_stub_both', 'in_with_stub_python', 'in_with_stub_stub'] +from stub_folder.with_stub import in_ + +#? ['with_stub', 'stub_only', 'with_stub_folder', 'stub_only_folder'] +from stub_folder. + + +# ------------------------- +# Folders +# ------------------------- + +#? int() +stub_only_folder.in_stub_only_folder +#? str() +with_stub_folder.in_with_stub_both_folder +#? int() +with_stub_folder.in_with_stub_python_folder +#? float() +with_stub_folder.in_with_stub_stub_folder + +#? ['in_stub_only_folder'] +stub_only_folder.in_ +#? ['in_with_stub_both_folder', 'in_with_stub_python_folder', 'in_with_stub_stub_folder'] +with_stub_folder.in_ + +# ------------------------- +# Folders nested with stubs +# ------------------------- + +from stub_folder.with_stub_folder import nested_stub_only, nested_with_stub, \ + python_only + +#? int() +nested_stub_only.in_stub_only +#? float() +nested_with_stub.in_both +#? str() +nested_with_stub.in_python +#? int() +nested_with_stub.in_stub +#? str() +python_only.in_python + +#? ['in_stub_only_folder'] +stub_only_folder.in_ +#? ['in_with_stub_both_folder', 'in_with_stub_python_folder', 'in_with_stub_stub_folder'] +with_stub_folder.in_ +#? ['in_python'] +python_only.in_ + +# ------------------------- +# Folders nested with stubs +# ------------------------- + +from stub_folder.stub_only_folder import nested_stub_only, nested_with_stub, \ + python_only + +#? int() +nested_stub_only.in_stub_only +#? float() +nested_with_stub.in_both +#? str() +nested_with_stub.in_python +#? int() +nested_with_stub.in_stub +#? str() +python_only.in_python + +#? ['in_stub_only'] +nested_stub_only.in_ +#? ['in_both', 'in_python', 'in_stub'] +nested_with_stub.in_ +#? ['in_python'] +python_only.in_ diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/sys_path.py b/bundle/jedi-vim/pythonx/jedi/test/completion/sys_path.py new file mode 100644 index 000000000..8bcde9d20 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/sys_path.py @@ -0,0 +1,24 @@ + +import sys +import os +from os.path import dirname + +sys.path.insert(0, '../../jedi') +sys.path.append(os.path.join(dirname(__file__), 'thirdparty')) + +# modifications, that should fail: +# syntax err +sys.path.append('a' +* '/thirdparty') + +#? ['inference'] +import inference + +#? ['inference_state_function_cache'] +inference.inference_state_fu + +# Those don't work because dirname and abspath are not properly understood. +#? ['jedi_'] +import jedi_ + +#? ['el'] +jedi_.el diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/thirdparty/PyQt4_.py b/bundle/jedi-vim/pythonx/jedi/test/completion/thirdparty/PyQt4_.py new file mode 100644 index 000000000..f4e41837c --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/thirdparty/PyQt4_.py @@ -0,0 +1,19 @@ +from PyQt4.QtCore import * +from PyQt4.QtGui import * + +#? ['QActionGroup'] +QActionGroup + +#? ['currentText'] +QStyleOptionComboBox().currentText + +#? [] +QStyleOptionComboBox().currentText. + +from PyQt4 import QtGui + +#? ['currentText'] +QtGui.QStyleOptionComboBox().currentText + +#? [] +QtGui.QStyleOptionComboBox().currentText. diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/thirdparty/jedi_.py b/bundle/jedi-vim/pythonx/jedi/test/completion/thirdparty/jedi_.py new file mode 100644 index 000000000..f588f7407 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/thirdparty/jedi_.py @@ -0,0 +1,52 @@ + +from jedi import functions, inference, parsing + +el = functions.complete()[0] +#? ['description'] +el.description + +#? str() +el.description + + +scopes, path, dot, like = \ + api._prepare_goto(source, row, column, path, True) + +# has problems with that (sometimes) very deep nesting. +#? set() +el = scopes + +# get_names_for_scope is also recursion stuff +#? tuple() +el = list(inference.get_names_for_scope())[0] + +#? int() parsing.Module() +el = list(inference.get_names_for_scope(1))[0][0] +#? parsing.Module() +el = list(inference.get_names_for_scope())[0][0] + +#? list() +el = list(inference.get_names_for_scope(1))[0][1] +#? list() +el = list(inference.get_names_for_scope())[0][1] + +#? list() +parsing.Scope((0,0)).get_set_vars() +#? parsing.Import() parsing.Name() +parsing.Scope((0,0)).get_set_vars()[0] +# TODO access parent is not possible, because that is not set in the class +## parsing.Class() +parsing.Scope((0,0)).get_set_vars()[0].parent + +#? parsing.Import() parsing.Name() +el = list(inference.get_names_for_scope())[0][1][0] + +#? inference.Array() inference.Class() inference.Function() inference.Instance() +list(inference.follow_call())[0] + +# With the right recursion settings, this should be possible (and maybe more): +# Array Class Function Generator Instance Module +# However, this was produced with the recursion settings 10/350/10000, and +# lasted 18.5 seconds. So we just have to be content with the results. +#? inference.Class() inference.Function() +inference.get_scopes_for_name()[0] diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/thirdparty/psycopg2_.py b/bundle/jedi-vim/pythonx/jedi/test/completion/thirdparty/psycopg2_.py new file mode 100644 index 000000000..834704b3a --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/thirdparty/psycopg2_.py @@ -0,0 +1,11 @@ +import psycopg2 + +conn = psycopg2.connect('dbname=test') + +#? ['cursor'] +conn.cursor + +cur = conn.cursor() + +#? ['fetchall'] +cur.fetchall diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/thirdparty/pylab_.py b/bundle/jedi-vim/pythonx/jedi/test/completion/thirdparty/pylab_.py new file mode 100644 index 000000000..ab132a450 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/thirdparty/pylab_.py @@ -0,0 +1,36 @@ +import pylab + +# two gotos +#! ['module numpy'] +import numpy + +#! ['module random'] +import numpy.random + +#? ['array2string'] +numpy.array2string + +#? ['shape'] +numpy.matrix().shape + +#? ['random_integers'] +pylab.random_integers + +#? [] +numpy.random_integers + +#? ['random_integers'] +numpy.random.random_integers +#? ['sample'] +numpy.random.sample + +import numpy +na = numpy.array([1,2]) +#? ['shape'] +na.shape + +# shouldn't raise an error #29, jedi-vim +# doesn't return something, because matplotlib uses __import__ +fig = pylab.figure() +#? +fig.add_subplot diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/types.py b/bundle/jedi-vim/pythonx/jedi/test/completion/types.py new file mode 100644 index 000000000..e67be4e10 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/types.py @@ -0,0 +1,187 @@ +# ----------------- +# non array +# ----------------- + +#? ['imag'] +int.imag + +#? [] +int.is_integer + +#? ['is_integer'] +float.is_int + +#? ['is_integer'] +1.0.is_integer + +#? ['upper'] +"".upper + +#? ['upper'] +r"".upper + +# strangely this didn't work, because the = is used for assignments +#? ['upper'] +"=".upper +a = "=" +#? ['upper'] +a.upper + + +# ----------------- +# lists +# ----------------- +arr = [] +#? ['append'] +arr.app + +#? ['append'] +list().app +#? ['append'] +[].append + +arr2 = [1,2,3] +#? ['append'] +arr2.app + +#? int() +arr.count(1) + +x = [] +#? +x.pop() +x = [3] +#? int() +x.pop() +x = [] +x.append(1.0) +#? float() +x.pop() + +# ----------------- +# dicts +# ----------------- +dic = {} + +#? ['copy', 'clear'] +dic.c + +dic2 = dict(a=1, b=2) +#? ['pop', 'popitem'] +dic2.p +#? ['popitem'] +{}.popitem + +dic2 = {'asdf': 3} +#? ['popitem'] +dic2.popitem + +#? int() +dic2['asdf'] + +d = {'a': 3, 1.0: list} + +#? int() list +d.values()[0] +##? int() list +dict(d).values()[0] + +#? str() +d.items()[0][0] +#? int() +d.items()[0][1] + +(a, b), = {a:1 for a in [1.0]}.items() +#? float() +a +#? int() +b + +# ----------------- +# tuples +# ----------------- +tup = ('',2) + +#? ['count'] +tup.c + +tup2 = tuple() +#? ['index'] +tup2.i +#? ['index'] +().i + +tup3 = 1,"" +#? ['index'] +tup3.index + +tup4 = 1,"" +#? ['index'] +tup4.index + +# ----------------- +# set +# ----------------- +set_t = {1,2} + +#? ['clear', 'copy'] +set_t.c + +set_t2 = set() + +#? ['clear', 'copy'] +set_t2.c + +# ----------------- +# pep 448 unpacking generalizations +# ----------------- + +d = {'a': 3} +dc = {v: 3 for v in ['a']} + +#? dict() +{**d} + +#? dict() +{**dc} + +#? str() +{**d, "b": "b"}["b"] + +#? str() +{**dc, "b": "b"}["b"] + +# Should resolve to int() but jedi is not smart enough yet +# Here to make sure it doesn't result in crash though +#? +{**d}["a"] + +# Should resolve to int() but jedi is not smart enough yet +# Here to make sure it doesn't result in crash though +#? +{**dc}["a"] + +s = {1, 2, 3} + +#? set() +{*s} + +#? set() +{*s, 4, *s} + +s = {1, 2, 3} +# Should resolve to int() but jedi is not smart enough yet +# Here to make sure it doesn't result in crash though +#? +{*s}.pop() + +#? int() +{*s, 4}.pop() + +# Should resolve to int() but jedi is not smart enough yet +# Here to make sure it doesn't result in crash though +#? +[*s][0] + +#? int() +[*s, 4][0] diff --git a/bundle/jedi-vim/pythonx/jedi/test/completion/usages.py b/bundle/jedi-vim/pythonx/jedi/test/completion/usages.py new file mode 100644 index 000000000..8afffb40e --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/completion/usages.py @@ -0,0 +1,394 @@ +""" +Renaming tests. This means search for references. +I always leave a little bit of space to add room for additions, because the +results always contain position informations. +""" +#< 4 (0,4), (3,0), (5,0), (12,4), (14,5), (15,0), (17,0), (19,0) +def abcd(): pass + +#< 0 (-3,4), (0,0), (2,0), (9,4), (11,5), (12,0), (14,0), (16,0) +abcd.d.a.bsaasd.abcd.d + +abcd +# unicode chars shouldn't be a problem. +x['smörbröd'].abcd + +# With the new parser these statements are not recognized as stateents, because +# they are not valid Python. +if 1: + abcd = +else: + (abcd) = +abcd = +#< (-17,4), (-14,0), (-12,0), (0,0), (2,0), (-2,0), (-3,5), (-5,4) +abcd + +abcd = 5 + + +Abc = 3 + +#< 6 (-3,0), (0,6), (2,4), (5,8), (17,0) +class Abc(): + #< (-5,0), (-2,6), (0,4), (2,8), (3,8), (15,0) + Abc + + def Abc(self): + Abc; self.c = 3 + + #< 17 (0,16), (2,8) + def a(self, Abc): + #< 10 (-2,16), (0,8) + Abc + + #< 19 (0,18), (2,8) + def self_test(self): + #< 12 (-2,18), (0,8) + self.b + +Abc.d.Abc + + +#< 4 (0,4), (5,1) +def blubi(): + pass + + +#< (-5,4), (0,1) +@blubi +def a(): pass + + +#< 0 (0,0), (1,0) +set_object_var = object() +set_object_var.var = 1 + +def func(a, b): + a = 12 + #< 4 (0,4), (3,8) + c = a + if True: + #< 8 (-3,4), (0,8) + c = b + +response = 5 +#< 0 (-2,0), (0,0), (1,0), (2,0), (4,0) +response = HttpResponse(mimetype='application/pdf') +response['Content-Disposition'] = 'attachment; filename=%s.pdf' % id +response.write(pdf) +#< (-6,0), (-4,0), (-3,0), (-2,0), (0,0) +response + + +# ----------------- +# imports +# ----------------- +#< (0,7), (3,0) +import module_not_exists + +#< (-3,7), (0,0) +module_not_exists + + +#< ('import_tree.rename1', 1,0), (0,24), (3,0), (6,17), ('import_tree.rename2', 4,17), (11,17), (14,17), ('imports', 72, 16) +from import_tree import rename1 + +#< (0,8), ('import_tree.rename1',3,0), ('import_tree.rename2',4,32), ('import_tree.rename2',6,0), (3,32), (8,32), (5,0) +rename1.abc + +#< (-3,8), ('import_tree.rename1', 3,0), ('import_tree.rename2', 4,32), ('import_tree.rename2', 6,0), (0,32), (5,32), (2,0) +from import_tree.rename1 import abc +#< (-5,8), (-2,32), ('import_tree.rename1', 3,0), ('import_tree.rename2', 4,32), ('import_tree.rename2', 6,0), (0,0), (3,32) +abc + +#< 20 ('import_tree.rename1', 1,0), ('import_tree.rename2', 4,17), (-11,24), (-8,0), (-5,17), (0,17), (3,17), ('imports', 72, 16) +from import_tree.rename1 import abc + +#< (0, 32), +from import_tree.rename1 import not_existing + +# Shouldn't raise an error or do anything weird. +from not_existing import * + +# ----------------- +# classes +# ----------------- + +class TestMethods(object): + #< 8 (0,8), (2,13) + def a_method(self): + #< 13 (-2,8), (0,13) + self.a_method() + #< 13 (2,8), (0,13), (3,13) + self.b_method() + + def b_method(self): + self.b_method + + +class TestClassVar(object): + #< 4 (0,4), (5,13), (7,21) + class_v = 1 + def a(self): + class_v = 1 + + #< (-5,4), (0,13), (2,21) + self.class_v + #< (-7,4), (-2,13), (0,21) + TestClassVar.class_v + #< (0,8), (-7, 8) + class_v + +class TestInstanceVar(): + def a(self): + #< 13 (4,13), (0,13) + self._instance_var = 3 + + def b(self): + #< (-4,13), (0,13) + self._instance_var + # A call to self used to trigger an error, because it's also a trailer + # with two children. + self() + + +class NestedClass(): + def __getattr__(self, name): + return self + +# Shouldn't find a definition, because there's other `instance`. +#< (0, 14), +NestedClass().instance + + +# ----------------- +# inheritance +# ----------------- +class Super(object): + #< 4 (0,4), (23,18), (25,13) + base_class = 1 + #< 4 (0,4), + class_var = 1 + + #< 8 (0,8), + def base_method(self): + #< 13 (0,13), (20,13) + self.base_var = 1 + #< 13 (0,13), + self.instance_var = 1 + + #< 8 (0,8), + def just_a_method(self): pass + + +#< 20 (0,16), (-18,6) +class TestClass(Super): + #< 4 (0,4), + class_var = 1 + + def x_method(self): + + #< (0,18), (2,13), (-23,4) + TestClass.base_class + #< (-2,18), (0,13), (-25,4) + self.base_class + #< (-20,13), (0,13) + self.base_var + #< (0, 18), + TestClass.base_var + + + #< 13 (5,13), (0,13) + self.instance_var = 3 + + #< 9 (0,8), + def just_a_method(self): + #< (-5,13), (0,13) + self.instance_var + + +# ----------------- +# properties +# ----------------- +class TestProperty: + + @property + #< 10 (0,8), (5,13) + def prop(self): + return 1 + + def a(self): + #< 13 (-5,8), (0,13) + self.prop + + @property + #< 13 (0,8), (4,5), (6,8), (11,13) + def rw_prop(self): + return self._rw_prop + + #< 8 (-4,8), (0,5), (2,8), (7,13) + @rw_prop.setter + #< 8 (-6,8), (-2,5), (0,8), (5,13) + def rw_prop(self, value): + self._rw_prop = value + + def b(self): + #< 13 (-11,8), (-7,5), (-5,8), (0,13) + self.rw_prop + +# ----------------- +# *args, **kwargs +# ----------------- +#< 11 (1,11), (0,8) +def f(**kwargs): + return kwargs + + +# ----------------- +# No result +# ----------------- +if isinstance(j, int): + #< (0, 4), + j + +# ----------------- +# Dynamic Param Search +# ----------------- + +class DynamicParam(): + def foo(self): + return + +def check(instance): + #< 13 (-5,8), (0,13) + instance.foo() + +check(DynamicParam()) + +# ----------------- +# Compiled Objects +# ----------------- + +import _sre + +# TODO reenable this, it's currently not working, because of 2/3 +# inconsistencies in typeshed (_sre exists in typeshed/2, but not in +# typeshed/3). +##< 0 (-3,7), (0,0), ('_sre', None, None) +_sre + +# ----------------- +# on syntax +# ----------------- + +#< 0 +import undefined + +# ----------------- +# comprehensions +# ----------------- + +#< 0 (0,0), (2,12) +x = 32 +#< 12 (-2,0), (0,12) +[x for x in x] + +#< 0 (0,0), (2,1), (2,12) +y = 32 +#< 12 (-2,0), (0,1), (0,12) +[y for b in y] + + +#< 1 (0,1), (0,7) +[x for x in something] +#< 7 (0,1), (0,7) +[x for x in something] + +z = 3 +#< 1 (0,1), (0,10) +{z:1 for z in something} +#< 10 (0,1), (0,10) +{z:1 for z in something} + +#< 8 (0,6), (0, 40) +[[x + nested_loopv2 for x in bar()] for nested_loopv2 in baz()] + +#< 25 (0,20), (0, 65) +(("*" if abs(foo(x, nested_loopv1)) else " " for x in bar()) for nested_loopv1 in baz()) + + +def whatever_func(): + zzz = 3 + if UNDEFINED: + zzz = 5 + if UNDEFINED2: + #< (3, 8), (4, 4), (0, 12), (-3, 8), (-5, 4) + zzz + else: + #< (0, 8), (1, 4), (-3, 12), (-6, 8), (-8, 4) + zzz + zzz + +# ----------------- +# global +# ----------------- + +def global_usage1(): + #< (0, 4), (4, 11), (6, 4), (9, 8), (12, 4) + my_global + +def global_definition(): + #< (-4, 4), (0, 11), (2, 4), (5, 8), (8, 4) + global my_global + #< 4 (-6, 4), (-2, 11), (0, 4), (3, 8), (6, 4) + my_global = 3 + if WHATEVER: + #< 8 (-9, 4), (-5, 11), (-3, 4), (0, 8), (3, 4) + my_global = 4 + +def global_usage2() + my_global + +def not_global(my_global): + my_global + +class DefinitelyNotGlobal: + def my_global(self): + def my_global(self): + pass + +# ----------------- +# stubs +# ----------------- + +from stub_folder import with_stub +#< ('stub:stub_folder.with_stub', 5, 4), ('stub_folder.with_stub', 5, 4), (0, 10) +with_stub.stub_function +from stub_folder.with_stub_folder.nested_stub_only import in_stub_only +#< ('stub:stub_folder.with_stub_folder.nested_stub_only', 2, 4), ('stub:stub_folder.with_stub_folder.nested_stub_only', 4, 4), ('stubs', 64, 17), (-2, 58), (0, 0) +in_stub_only +from stub_folder.with_stub_folder.nested_with_stub import in_python +#< ('stub_folder.with_stub_folder.nested_with_stub', 1, 0), ('stubs', 68, 17), (-2, 58), (0, 0) +in_python +from stub_folder.with_stub_folder.nested_with_stub import in_both +#< ('stub_folder.with_stub_folder.nested_with_stub', 2, 0), ('stub:stub_folder.with_stub_folder.nested_with_stub', 2, 0), ('stubs', 66, 17), (-2, 58), (0, 0) +in_both + +# ----------------- +# across directories +# ----------------- + +#< 8 (0, 0), (3, 4), ('import_tree.references', 1, 21), ('import_tree.references', 5, 4) +usage_definition = 1 +if False: + #< 8 (-3, 0), (0, 4), ('import_tree.references', 1, 21), ('import_tree.references', 5, 4) + usage_definition() + +# ----------------- +# stdlib stuff +# ----------------- + +import socket +#< (1, 21), (0, 7), ('socket', ..., 6), ('stub:socket', ..., 4), ('imports', ..., 7) +socket.SocketIO +some_socket = socket.SocketIO() diff --git a/bundle/jedi-vim/pythonx/jedi/test/conftest.py b/bundle/jedi-vim/pythonx/jedi/test/conftest.py new file mode 100644 index 000000000..057c4b623 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/conftest.py @@ -0,0 +1,170 @@ +import os +import sys +import subprocess + +import pytest + +from . import helpers +from . import run +from . import refactor +from jedi import InterpreterEnvironment, get_system_environment +from jedi.inference.compiled.value import create_from_access_path +from jedi.api.interpreter import MixedModuleContext + +# For interpreter tests sometimes the path of this directory is in the sys +# path, which we definitely don't want. So just remove it globally. +try: + sys.path.remove(helpers.test_dir) +except ValueError: + pass + + +def pytest_addoption(parser): + parser.addoption( + "--integration-case-dir", + default=os.path.join(helpers.test_dir, 'completion'), + help="Directory in which integration test case files locate.") + parser.addoption( + "--refactor-case-dir", + default=os.path.join(helpers.test_dir, 'refactor'), + help="Directory in which refactoring test case files locate.") + parser.addoption( + "--test-files", "-T", default=[], action='append', + help=( + "Specify test files using FILE_NAME[:LINE[,LINE[,...]]]. " + "For example: -T generators.py:10,13,19. " + "Note that you can use -m to specify the test case by id.")) + parser.addoption( + "--thirdparty", action='store_true', + help="Include integration tests that requires third party modules.") + + +def parse_test_files_option(opt): + """ + Parse option passed to --test-files into a key-value pair. + + >>> parse_test_files_option('generators.py:10,13,19') + ('generators.py', [10, 13, 19]) + """ + opt = str(opt) + if ':' in opt: + (f_name, rest) = opt.split(':', 1) + return f_name, list(map(int, rest.split(','))) + else: + return opt, [] + + +def pytest_generate_tests(metafunc): + """ + :type metafunc: _pytest.python.Metafunc + """ + test_files = dict(map(parse_test_files_option, + metafunc.config.option.test_files)) + if 'case' in metafunc.fixturenames: + base_dir = metafunc.config.option.integration_case_dir + thirdparty = metafunc.config.option.thirdparty + cases = list(run.collect_dir_tests(base_dir, test_files)) + if thirdparty: + cases.extend(run.collect_dir_tests( + os.path.join(base_dir, 'thirdparty'), test_files, True)) + ids = ["%s:%s" % (c.module_name, c.line_nr_test) for c in cases] + metafunc.parametrize('case', cases, ids=ids) + + if 'refactor_case' in metafunc.fixturenames: + base_dir = metafunc.config.option.refactor_case_dir + cases = list(refactor.collect_dir_tests(base_dir, test_files)) + metafunc.parametrize( + 'refactor_case', cases, + ids=[c.refactor_type + '-' + c.name for c in cases] + ) + + if 'static_analysis_case' in metafunc.fixturenames: + base_dir = os.path.join(os.path.dirname(__file__), 'static_analysis') + cases = list(collect_static_analysis_tests(base_dir, test_files)) + metafunc.parametrize( + 'static_analysis_case', + cases, + ids=[c.name for c in cases] + ) + + +def collect_static_analysis_tests(base_dir, test_files): + for f_name in os.listdir(base_dir): + files_to_execute = [a for a in test_files.items() if a[0] in f_name] + if f_name.endswith(".py") and (not test_files or files_to_execute): + path = os.path.join(base_dir, f_name) + yield run.StaticAnalysisCase(path) + + +@pytest.fixture(scope='session') +def venv_path(tmpdir_factory, environment): + if isinstance(environment, InterpreterEnvironment): + # The environment can be a tox virtualenv environment which we don't + # want, so use the system environment. + environment = get_system_environment( + '.'.join(str(x) for x in environment.version_info[:2]) + ) + + tmpdir = tmpdir_factory.mktemp('venv_path') + dirname = os.path.join(tmpdir.strpath, 'venv') + + # We cannot use the Python from tox because tox creates virtualenvs and + # they have different site.py files that work differently than the default + # ones. Instead, we find the real Python executable by printing the value + # of sys.base_prefix or sys.real_prefix if we are in a virtualenv. + output = subprocess.check_output([ + environment.executable, "-c", + "import sys; " + "print(sys.real_prefix if hasattr(sys, 'real_prefix') else sys.base_prefix)" + ]) + prefix = output.rstrip().decode('utf8') + if os.name == 'nt': + executable_path = os.path.join(prefix, 'python') + else: + executable_name = os.path.basename(environment.executable) + executable_path = os.path.join(prefix, 'bin', executable_name) + + return_code = subprocess.call([executable_path, '-m', 'venv', dirname]) + assert return_code == 0, return_code + return dirname + + +@pytest.fixture() +def cwd_tmpdir(monkeypatch, tmpdir): + with helpers.set_cwd(tmpdir.strpath): + yield tmpdir + + +@pytest.fixture +def inference_state(Script): + return Script('')._inference_state + + +@pytest.fixture +def same_process_inference_state(Script): + return Script('', environment=InterpreterEnvironment())._inference_state + + +@pytest.fixture +def disable_typeshed(monkeypatch): + from jedi.inference.gradual import typeshed + monkeypatch.setattr(typeshed, '_load_from_typeshed', lambda *args, **kwargs: None) + + +@pytest.fixture +def create_compiled_object(inference_state): + return lambda obj: create_from_access_path( + inference_state, + inference_state.compiled_subprocess.create_simple_object(obj) + ) + + +@pytest.fixture(params=[False, True]) +def class_findable(monkeypatch, request): + if not request.param: + monkeypatch.setattr( + MixedModuleContext, + '_get_mixed_object', + lambda self, compiled_object: compiled_object.as_context() + ) + return request.param diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/README.rst b/bundle/jedi-vim/pythonx/jedi/test/examples/README.rst new file mode 100644 index 000000000..ad8c5f76d --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/README.rst @@ -0,0 +1,5 @@ +Examples +======== + +Here you can find project structures that match other Python projects. This is +then used to check if jedi understands these structures. diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/buildout_project/bin/app b/bundle/jedi-vim/pythonx/jedi/test/examples/buildout_project/bin/app new file mode 100644 index 000000000..e8df4eb61 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/buildout_project/bin/app @@ -0,0 +1,12 @@ +#!/usr/bin/python + +import sys +sys.path[0:0] = [ + '/usr/lib/python3.8/site-packages', + '/tmp/.buildout/eggs/important_package.egg' +] + +import important_package + +if __name__ == '__main__': + sys.exit(important_package.main()) diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/buildout_project/bin/binary_file b/bundle/jedi-vim/pythonx/jedi/test/examples/buildout_project/bin/binary_file new file mode 100644 index 000000000..f1ad7558e --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/buildout_project/bin/binary_file @@ -0,0 +1 @@ +PNG diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/buildout_project/bin/empty_file b/bundle/jedi-vim/pythonx/jedi/test/examples/buildout_project/bin/empty_file new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/buildout_project/buildout.cfg b/bundle/jedi-vim/pythonx/jedi/test/examples/buildout_project/buildout.cfg new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/buildout_project/src/proj_name/module_name.py b/bundle/jedi-vim/pythonx/jedi/test/examples/buildout_project/src/proj_name/module_name.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/django/app/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/django/app/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/django/app/models.py b/bundle/jedi-vim/pythonx/jedi/test/examples/django/app/models.py new file mode 100644 index 000000000..7890e53cd --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/django/app/models.py @@ -0,0 +1 @@ +SomeModel = "bar" diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/django/manage.py b/bundle/jedi-vim/pythonx/jedi/test/examples/django/manage.py new file mode 100644 index 000000000..8940f07ed --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/django/manage.py @@ -0,0 +1,10 @@ +#!/usr/bin/env python +import os +import sys + +if __name__ == "__main__": + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "foobar.settings") + + from django.core.management import execute_from_command_line + + execute_from_command_line(sys.argv) diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/flask-site-packages/flask/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/flask-site-packages/flask/__init__.py new file mode 100644 index 000000000..e876bc153 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/flask-site-packages/flask/__init__.py @@ -0,0 +1 @@ + diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/flask-site-packages/flask/ext/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/flask-site-packages/flask/ext/__init__.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/flask-site-packages/flask/ext/__init__.py @@ -0,0 +1 @@ + diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/flask-site-packages/flask_baz/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/flask-site-packages/flask_baz/__init__.py new file mode 100644 index 000000000..e9b3fffe0 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/flask-site-packages/flask_baz/__init__.py @@ -0,0 +1 @@ +Baz = 1 diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/flask-site-packages/flask_foo.py b/bundle/jedi-vim/pythonx/jedi/test/examples/flask-site-packages/flask_foo.py new file mode 100644 index 000000000..0b910b80d --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/flask-site-packages/flask_foo.py @@ -0,0 +1,2 @@ +class Foo(object): + pass diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/flask-site-packages/flaskext/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/flask-site-packages/flaskext/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/flask-site-packages/flaskext/bar.py b/bundle/jedi-vim/pythonx/jedi/test/examples/flask-site-packages/flaskext/bar.py new file mode 100644 index 000000000..6629f9aec --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/flask-site-packages/flaskext/bar.py @@ -0,0 +1,2 @@ +class Bar(object): + pass diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/flask-site-packages/flaskext/moo/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/flask-site-packages/flaskext/moo/__init__.py new file mode 100644 index 000000000..266e80937 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/flask-site-packages/flaskext/moo/__init__.py @@ -0,0 +1 @@ +Moo = 1 diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/implicit_namespace_package/ns1/pkg/ns1_file.py b/bundle/jedi-vim/pythonx/jedi/test/examples/implicit_namespace_package/ns1/pkg/ns1_file.py new file mode 100644 index 000000000..940279f9f --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/implicit_namespace_package/ns1/pkg/ns1_file.py @@ -0,0 +1 @@ +foo = 'ns1_file!' diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/implicit_namespace_package/ns2/pkg/ns2_file.py b/bundle/jedi-vim/pythonx/jedi/test/examples/implicit_namespace_package/ns2/pkg/ns2_file.py new file mode 100644 index 000000000..e87d7d886 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/implicit_namespace_package/ns2/pkg/ns2_file.py @@ -0,0 +1 @@ +foo = 'ns2_file!' diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/implicit_nested_namespaces/namespace/pkg/module.py b/bundle/jedi-vim/pythonx/jedi/test/examples/implicit_nested_namespaces/namespace/pkg/module.py new file mode 100644 index 000000000..3c3782053 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/implicit_nested_namespaces/namespace/pkg/module.py @@ -0,0 +1 @@ +CONST = 1 diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/import-recursion/cadquery_simple/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/import-recursion/cadquery_simple/__init__.py new file mode 100644 index 000000000..e87c66328 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/import-recursion/cadquery_simple/__init__.py @@ -0,0 +1 @@ +from .cq import selectors diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/import-recursion/cadquery_simple/cq.py b/bundle/jedi-vim/pythonx/jedi/test/examples/import-recursion/cadquery_simple/cq.py new file mode 100644 index 000000000..90bb9ac75 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/import-recursion/cadquery_simple/cq.py @@ -0,0 +1 @@ +from . import selectors diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/import-recursion/cq_example.py b/bundle/jedi-vim/pythonx/jedi/test/examples/import-recursion/cq_example.py new file mode 100644 index 000000000..00a571762 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/import-recursion/cq_example.py @@ -0,0 +1,3 @@ +import cadquery_simple as cq + +cq. diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/inheritance/pkg/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/inheritance/pkg/__init__.py new file mode 100644 index 000000000..668dd9b62 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/inheritance/pkg/__init__.py @@ -0,0 +1,6 @@ +from .module import Bar + + +class Foo(Bar): + def foo(self): + pass diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/inheritance/pkg/module.py b/bundle/jedi-vim/pythonx/jedi/test/examples/inheritance/pkg/module.py new file mode 100644 index 000000000..9da99ca16 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/inheritance/pkg/module.py @@ -0,0 +1,4 @@ + +class Bar: + def bar(self): + pass diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/init_extension_module/__init__.cpython-38-x86_64-linux-gnu.so b/bundle/jedi-vim/pythonx/jedi/test/examples/init_extension_module/__init__.cpython-38-x86_64-linux-gnu.so new file mode 100644 index 000000000..199578410 Binary files /dev/null and b/bundle/jedi-vim/pythonx/jedi/test/examples/init_extension_module/__init__.cpython-38-x86_64-linux-gnu.so differ diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/init_extension_module/module.c b/bundle/jedi-vim/pythonx/jedi/test/examples/init_extension_module/module.c new file mode 100644 index 000000000..bfa06f6b3 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/init_extension_module/module.c @@ -0,0 +1,15 @@ +#include "Python.h" + +static struct PyModuleDef module = { + PyModuleDef_HEAD_INIT, + "init_extension_module", + NULL, + -1, + NULL +}; + +PyMODINIT_FUNC PyInit_init_extension_module(void){ + PyObject *m = PyModule_Create(&module); + PyModule_AddObject(m, "foo", Py_None); + return m; +} diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/init_extension_module/setup.py b/bundle/jedi-vim/pythonx/jedi/test/examples/init_extension_module/setup.py new file mode 100644 index 000000000..5ce051774 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/init_extension_module/setup.py @@ -0,0 +1,10 @@ +from distutils.core import setup, Extension + +setup(name='init_extension_module', + version='0.0', + description='', + ext_modules=[ + Extension('init_extension_module.__init__', + sources=['module.c']) + ] +) diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/issue1209/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/issue1209/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/issue1209/api/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/issue1209/api/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/issue1209/api/whatever/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/issue1209/api/whatever/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/issue1209/api/whatever/api_test1.py b/bundle/jedi-vim/pythonx/jedi/test/examples/issue1209/api/whatever/api_test1.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/issue1209/whatever/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/issue1209/whatever/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/issue1209/whatever/test.py b/bundle/jedi-vim/pythonx/jedi/test/examples/issue1209/whatever/test.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package/ns1/pkg/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package/ns1/pkg/__init__.py new file mode 100644 index 000000000..bc10ee24e --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package/ns1/pkg/__init__.py @@ -0,0 +1,9 @@ +foo = 'ns1!' + +# this is a namespace package +try: + import pkg_resources + pkg_resources.declare_namespace(__name__) +except ImportError: + import pkgutil + __path__ = pkgutil.extend_path(__path__, __name__) diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package/ns1/pkg/ns1_file.py b/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package/ns1/pkg/ns1_file.py new file mode 100644 index 000000000..940279f9f --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package/ns1/pkg/ns1_file.py @@ -0,0 +1 @@ +foo = 'ns1_file!' diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package/ns1/pkg/ns1_folder/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package/ns1/pkg/ns1_folder/__init__.py new file mode 100644 index 000000000..9eeeb294d --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package/ns1/pkg/ns1_folder/__init__.py @@ -0,0 +1 @@ +foo = 'ns1_folder!' diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package/ns2/pkg/ns2_file.py b/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package/ns2/pkg/ns2_file.py new file mode 100644 index 000000000..e87d7d886 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package/ns2/pkg/ns2_file.py @@ -0,0 +1 @@ +foo = 'ns2_file!' diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package/ns2/pkg/ns2_folder/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package/ns2/pkg/ns2_folder/__init__.py new file mode 100644 index 000000000..70b24ae4f --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package/ns2/pkg/ns2_folder/__init__.py @@ -0,0 +1 @@ +foo = 'ns2_folder!' diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package/ns2/pkg/ns2_folder/nested/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package/ns2/pkg/ns2_folder/nested/__init__.py new file mode 100644 index 000000000..fbba1db13 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package/ns2/pkg/ns2_folder/nested/__init__.py @@ -0,0 +1 @@ +foo = 'nested!' diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package_relative_import/rel1.py b/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package_relative_import/rel1.py new file mode 100644 index 000000000..79b35a589 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package_relative_import/rel1.py @@ -0,0 +1 @@ +from .rel2 import name diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package_relative_import/rel2.py b/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package_relative_import/rel2.py new file mode 100644 index 000000000..14a0ee4af --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/namespace_package_relative_import/rel2.py @@ -0,0 +1 @@ +name = 1 diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/nested_namespaces/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/nested_namespaces/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/nested_namespaces/namespace/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/nested_namespaces/namespace/__init__.py new file mode 100644 index 000000000..42e33a76c --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/nested_namespaces/namespace/__init__.py @@ -0,0 +1,4 @@ +try: + __import__('pkg_resources').declare_namespace(__name__) +except ImportError: + pass diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/nested_namespaces/namespace/pkg/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/nested_namespaces/namespace/pkg/__init__.py new file mode 100644 index 000000000..3c3782053 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/nested_namespaces/namespace/pkg/__init__.py @@ -0,0 +1 @@ +CONST = 1 diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/not_in_sys_path/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/not_in_sys_path/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/not_in_sys_path/not_in_sys_path.py b/bundle/jedi-vim/pythonx/jedi/test/examples/not_in_sys_path/not_in_sys_path.py new file mode 100644 index 000000000..8943d8df2 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/not_in_sys_path/not_in_sys_path.py @@ -0,0 +1 @@ +value = 3 diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/not_in_sys_path/not_in_sys_path_package/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/not_in_sys_path/not_in_sys_path_package/__init__.py new file mode 100644 index 000000000..5ef1a7490 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/not_in_sys_path/not_in_sys_path_package/__init__.py @@ -0,0 +1 @@ +value = 'package' diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/not_in_sys_path/not_in_sys_path_package/module.py b/bundle/jedi-vim/pythonx/jedi/test/examples/not_in_sys_path/not_in_sys_path_package/module.py new file mode 100644 index 000000000..364dd0350 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/not_in_sys_path/not_in_sys_path_package/module.py @@ -0,0 +1 @@ +value = 'package.module' diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/not_in_sys_path/pkg/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/not_in_sys_path/pkg/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/not_in_sys_path/pkg/module.py b/bundle/jedi-vim/pythonx/jedi/test/examples/not_in_sys_path/pkg/module.py new file mode 100644 index 000000000..53c44ca1b --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/not_in_sys_path/pkg/module.py @@ -0,0 +1,7 @@ +from not_in_sys_path import not_in_sys_path +from not_in_sys_path import not_in_sys_path_package +from not_in_sys_path.not_in_sys_path_package import module + +not_in_sys_path.value +not_in_sys_path_package.value +module.value diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/sample_venvs/pth_directory/dir-from-foo-pth/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/sample_venvs/pth_directory/dir-from-foo-pth/__init__.py new file mode 100644 index 000000000..2a1d87006 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/sample_venvs/pth_directory/dir-from-foo-pth/__init__.py @@ -0,0 +1,2 @@ +# This file is here to force git to create the directory, as *.pth files only +# add existing directories. diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/sample_venvs/pth_directory/egg_link.egg-link b/bundle/jedi-vim/pythonx/jedi/test/examples/sample_venvs/pth_directory/egg_link.egg-link new file mode 100644 index 000000000..dde9b7d5f --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/sample_venvs/pth_directory/egg_link.egg-link @@ -0,0 +1 @@ +/path/from/egg-link diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/sample_venvs/pth_directory/foo.pth b/bundle/jedi-vim/pythonx/jedi/test/examples/sample_venvs/pth_directory/foo.pth new file mode 100644 index 000000000..885016822 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/sample_venvs/pth_directory/foo.pth @@ -0,0 +1 @@ +./dir-from-foo-pth diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/sample_venvs/pth_directory/import_smth.pth b/bundle/jedi-vim/pythonx/jedi/test/examples/sample_venvs/pth_directory/import_smth.pth new file mode 100644 index 000000000..0a978a8f7 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/sample_venvs/pth_directory/import_smth.pth @@ -0,0 +1 @@ +import smth; smth.extend_path_foo() diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/sample_venvs/pth_directory/relative.egg-link b/bundle/jedi-vim/pythonx/jedi/test/examples/sample_venvs/pth_directory/relative.egg-link new file mode 100644 index 000000000..7a9a6156b --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/sample_venvs/pth_directory/relative.egg-link @@ -0,0 +1 @@ +./relative/egg-link/path diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/sample_venvs/pth_directory/smth.py b/bundle/jedi-vim/pythonx/jedi/test/examples/sample_venvs/pth_directory/smth.py new file mode 100644 index 000000000..6d1eefe35 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/sample_venvs/pth_directory/smth.py @@ -0,0 +1,6 @@ +import sys +sys.path.append('/foo/smth.py:module') + + +def extend_path_foo(): + sys.path.append('/foo/smth.py:from_func') diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/simple_import/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/simple_import/__init__.py new file mode 100644 index 000000000..3a03a829f --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/simple_import/__init__.py @@ -0,0 +1,5 @@ +from simple_import import module + + +def in_function(): + from simple_import import module2 diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/simple_import/module.py b/bundle/jedi-vim/pythonx/jedi/test/examples/simple_import/module.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/simple_import/module2.py b/bundle/jedi-vim/pythonx/jedi/test/examples/simple_import/module2.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/speed/precedence.py b/bundle/jedi-vim/pythonx/jedi/test/examples/speed/precedence.py new file mode 100644 index 000000000..afd8824e0 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/speed/precedence.py @@ -0,0 +1,37 @@ +def marks(code): + if '.' in code: + another(code[:code.index(',') - 1] + '!') + else: + another(code + '.') + + +def another(code2): + call(numbers(code2 + 'haha')) + +marks('start1 ') +marks('start2 ') + + +def alphabet(code4): + if 1: + if 2: + return code4 + 'a' + else: + return code4 + 'b' + else: + if 2: + return code4 + 'c' + else: + return code4 + 'd' + + +def numbers(code5): + if 2: + return alphabet(code5 + '1') + else: + return alphabet(code5 + '2') + + +def call(code3): + code3 = numbers(numbers('end')) + numbers(code3) + code3.partition diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/stub_packages/no_python-stubs/__init__.pyi b/bundle/jedi-vim/pythonx/jedi/test/examples/stub_packages/no_python-stubs/__init__.pyi new file mode 100644 index 000000000..18770a107 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/stub_packages/no_python-stubs/__init__.pyi @@ -0,0 +1 @@ +foo: int diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/stub_packages/with_python-stubs/__init__.pyi b/bundle/jedi-vim/pythonx/jedi/test/examples/stub_packages/with_python-stubs/__init__.pyi new file mode 100644 index 000000000..d3e7ff030 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/stub_packages/with_python-stubs/__init__.pyi @@ -0,0 +1,6 @@ +from . import module + +func_with_stub = module.func_with_stub + +both: int +stub_only: str diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/stub_packages/with_python-stubs/module.pyi b/bundle/jedi-vim/pythonx/jedi/test/examples/stub_packages/with_python-stubs/module.pyi new file mode 100644 index 000000000..1a0c0453e --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/stub_packages/with_python-stubs/module.pyi @@ -0,0 +1,5 @@ +in_sub_module: int + + +def func_with_stub(b: int) -> float: + pass diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/stub_packages/with_python/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/examples/stub_packages/with_python/__init__.py new file mode 100644 index 000000000..a17d2e623 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/stub_packages/with_python/__init__.py @@ -0,0 +1,7 @@ +from with_python import module as _module + +func_without_stub = _module.func_without_stub + + +python_only = 1 +both = '' diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/stub_packages/with_python/module.py b/bundle/jedi-vim/pythonx/jedi/test/examples/stub_packages/with_python/module.py new file mode 100644 index 000000000..8058f05cc --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/stub_packages/with_python/module.py @@ -0,0 +1,9 @@ +def func_without_stub(a): + 'nostubdoc' + + +def func_with_stub(c): + 'withstubdoc' + + +in_sub_module = '' diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/typing_overload/file.py b/bundle/jedi-vim/pythonx/jedi/test/examples/typing_overload/file.py new file mode 100644 index 000000000..19f3149db --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/typing_overload/file.py @@ -0,0 +1,2 @@ +def with_overload(x, y: int) -> list: + pass diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/typing_overload/file.pyi b/bundle/jedi-vim/pythonx/jedi/test/examples/typing_overload/file.pyi new file mode 100644 index 000000000..1f31b5434 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/examples/typing_overload/file.pyi @@ -0,0 +1,8 @@ +from typing import overload + + +@overload +def with_overload(x: int, y: int) -> float: ... + +@overload +def with_overload(x: str, y: list) -> float: ... diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/zipped_imports/not_pkg.zip b/bundle/jedi-vim/pythonx/jedi/test/examples/zipped_imports/not_pkg.zip new file mode 100644 index 000000000..f1516a6aa Binary files /dev/null and b/bundle/jedi-vim/pythonx/jedi/test/examples/zipped_imports/not_pkg.zip differ diff --git a/bundle/jedi-vim/pythonx/jedi/test/examples/zipped_imports/pkg.zip b/bundle/jedi-vim/pythonx/jedi/test/examples/zipped_imports/pkg.zip new file mode 100644 index 000000000..0344f7469 Binary files /dev/null and b/bundle/jedi-vim/pythonx/jedi/test/examples/zipped_imports/pkg.zip differ diff --git a/bundle/jedi-vim/pythonx/jedi/test/helpers.py b/bundle/jedi-vim/pythonx/jedi/test/helpers.py new file mode 100644 index 000000000..b11f4941a --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/helpers.py @@ -0,0 +1,56 @@ +""" +A helper module for testing, improves compatibility for testing (as +``jedi._compatibility``) as well as introducing helper functions. +""" + +from contextlib import contextmanager + +import os +import pytest +from functools import partial, wraps +from jedi import Project +from pathlib import Path + +test_dir = Path(__file__).absolute().parent +test_dir_project = Project(test_dir) +root_dir = test_dir.parent +example_dir = test_dir.joinpath('examples') + +sample_int = 1 # This is used in completion/imports.py + +skip_if_windows = partial(pytest.param, + marks=pytest.mark.skipif("sys.platform=='win32'")) +skip_if_not_windows = partial(pytest.param, + marks=pytest.mark.skipif("sys.platform!='win32'")) + + +def get_example_dir(*names): + return example_dir.joinpath(*names) + + +def cwd_at(path): + """ + Decorator to run function at `path`. + + :type path: str + :arg path: relative path from repository root (e.g., ``'jedi'``). + """ + def decorator(func): + @wraps(func) + def wrapper(Script, **kwargs): + with set_cwd(path): + return func(Script, **kwargs) + return wrapper + return decorator + + +@contextmanager +def set_cwd(path, absolute_path=False): + repo_root = test_dir.parent + + oldcwd = Path.cwd() + os.chdir(repo_root.joinpath(path)) + try: + yield + finally: + os.chdir(oldcwd) diff --git a/bundle/jedi-vim/pythonx/jedi/test/refactor.py b/bundle/jedi-vim/pythonx/jedi/test/refactor.py new file mode 100644 index 000000000..7598bc7de --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/refactor.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python +""" +Refactoring tests work a little bit similar to integration tests. But the idea +is here to compare two versions of code. If you want to add a new test case, +just look at the existing ones in the ``test/refactor`` folder and copy them. +""" +import os +import platform +import re + +from parso import split_lines + +from functools import reduce +import jedi +from .helpers import test_dir + + +class RefactoringCase(object): + + def __init__(self, name, code, line_nr, index, path, kwargs, type_, desired_result): + self.name = name + self._code = code + self._line_nr = line_nr + self._index = index + self._path = path + self._kwargs = kwargs + self.type = type_ + self._desired_result = desired_result + + def get_desired_result(self): + + if platform.system().lower() == 'windows' and self.type == 'diff': + # Windows uses backslashes to separate paths. + lines = split_lines(self._desired_result, keepends=True) + for i, line in enumerate(lines): + if re.search(' import_tree/', line): + lines[i] = line.replace('/', '\\') + return ''.join(lines) + return self._desired_result + + @property + def refactor_type(self): + f_name = os.path.basename(self._path) + return f_name.replace('.py', '') + + def refactor(self, environment): + project = jedi.Project(os.path.join(test_dir, 'refactor')) + script = jedi.Script(self._code, path=self._path, project=project, environment=environment) + refactor_func = getattr(script, self.refactor_type) + return refactor_func(self._line_nr, self._index, **self._kwargs) + + def __repr__(self): + return '<%s: %s:%s>' % (self.__class__.__name__, + self.name, self._line_nr - 1) + + +def _collect_file_tests(code, path, lines_to_execute): + r = r'^# -{5,} ?([^\n]*)\n((?:(?!\n# \+{5,}).)*\n)' \ + r'# \+{5,}\n((?:(?!\n# -{5,}).)*\n)' + match = None + for match in re.finditer(r, code, re.DOTALL | re.MULTILINE): + name = match.group(1).strip() + first = match.group(2) + second = match.group(3) + + # get the line with the position of the operation + p = re.match(r'((?:(?!#\?).)*)#\? (\d*)( error| text|) ?([^\n]*)', first, re.DOTALL) + if p is None: + raise Exception("Please add a test start.") + continue + until = p.group(1) + index = int(p.group(2)) + type_ = p.group(3).strip() or 'diff' + if p.group(4): + kwargs = eval(p.group(4)) + else: + kwargs = {} + + line_nr = until.count('\n') + 2 + if lines_to_execute and line_nr - 1 not in lines_to_execute: + continue + + yield RefactoringCase(name, first, line_nr, index, path, kwargs, type_, second) + if match is None: + raise Exception(f"Didn't match any test for {path}, {code!r}") + if match.end() != len(code): + raise Exception(f"Didn't match until the end of the file in {path}") + + +def collect_dir_tests(base_dir, test_files): + for f_name in os.listdir(base_dir): + files_to_execute = [a for a in test_files.items() if a[0] in f_name] + lines_to_execute = reduce(lambda x, y: x + y[1], files_to_execute, []) + if f_name.endswith(".py") and (not test_files or files_to_execute): + path = os.path.join(base_dir, f_name) + with open(path, newline='') as f: + code = f.read() + for case in _collect_file_tests(code, path, lines_to_execute): + yield case diff --git a/bundle/jedi-vim/pythonx/jedi/test/refactor/extract_function.py b/bundle/jedi-vim/pythonx/jedi/test/refactor/extract_function.py new file mode 100644 index 000000000..da2fd2592 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/refactor/extract_function.py @@ -0,0 +1,463 @@ +# -------------------------------------------------- in-module-0 +global_var = 3 +def x(): + foo = 3.1 + #? 11 text {'new_name': 'bar'} + x = int(foo + 1 + global_var) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +global_var = 3 +def bar(foo): + return int(foo + 1 + global_var) + + +def x(): + foo = 3.1 + #? 11 text {'new_name': 'bar'} + x = bar(foo) +# -------------------------------------------------- in-module-1 +glob = 3 +#? 11 text {'new_name': 'a'} +test(100, (glob.a + b, c) + 1) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +glob = 3 +#? 11 text {'new_name': 'a'} +def a(b): + return glob.a + b + + +test(100, (a(b), c) + 1) +# -------------------------------------------------- in-module-2 +#? 0 text {'new_name': 'ab'} +100 + 1 * 2 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#? 0 text {'new_name': 'ab'} +def ab(): + return 100 + 1 * 2 + + +ab() +# -------------------------------------------------- in-function-1 +def f(x): +#? 11 text {'new_name': 'ab'} + return x + 1 * 2 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +def ab(x): + return x + 1 * 2 + + +def f(x): +#? 11 text {'new_name': 'ab'} + return ab(x) +# -------------------------------------------------- in-function-with-dec +@classmethod +def f(x): +#? 11 text {'new_name': 'ab'} + return x + 1 * 2 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +def ab(x): + return x + 1 * 2 + + +@classmethod +def f(x): +#? 11 text {'new_name': 'ab'} + return ab(x) +# -------------------------------------------------- in-method-1 +class X: + def z(self): pass + + def f(x, b): + #? 11 text {'new_name': 'ab'} + return x + b * 2 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +class X: + def z(self): pass + + def ab(x, b): + return x + b * 2 + + def f(x, b): + #? 11 text {'new_name': 'ab'} + return x.ab(b) +# -------------------------------------------------- in-method-2 +glob1 = 1 +class X: + def g(self): pass + + def f(self, b, c): + #? 11 text {'new_name': 'ab'} + return self.g() or self.f(b) ^ glob1 & b +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +glob1 = 1 +class X: + def g(self): pass + + def ab(self, b): + return self.g() or self.f(b) ^ glob1 & b + + def f(self, b, c): + #? 11 text {'new_name': 'ab'} + return self.ab(b) +# -------------------------------------------------- in-method-order +class X: + def f(self, b, c): + #? 18 text {'new_name': 'b'} + return b | self.a +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +class X: + def b(self, b): + return b | self.a + + def f(self, b, c): + #? 18 text {'new_name': 'b'} + return self.b(b) +# -------------------------------------------------- in-classmethod-1 +class X: + @classmethod + def f(x): + #? 16 text {'new_name': 'ab'} + return 25 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +class X: + @classmethod + def ab(x): + return 25 + + @classmethod + def f(x): + #? 16 text {'new_name': 'ab'} + return x.ab() +# -------------------------------------------------- in-staticmethod-1 +class X(int): + @staticmethod + def f(x): + #? 16 text {'new_name': 'ab'} + return 25 | 3 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +def ab(): + return 25 | 3 + +class X(int): + @staticmethod + def f(x): + #? 16 text {'new_name': 'ab'} + return ab() +# -------------------------------------------------- in-class-1 +class Ya(): + a = 3 + #? 11 text {'new_name': 'f'} + c = a + 2 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +def f(a): + return a + 2 + + +class Ya(): + a = 3 + #? 11 text {'new_name': 'f'} + c = f(a) +# -------------------------------------------------- in-closure +def x(z): + def y(x): + #? 15 text {'new_name': 'f'} + return -x * z +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +def f(x, z): + return -x * z + + +def x(z): + def y(x): + #? 15 text {'new_name': 'f'} + return f(x, z) +# -------------------------------------------------- with-range-1 +#? 0 text {'new_name': 'a', 'until_line': 4} +v1 = 3 +v2 = 2 +x = test(v1 + v2 * v3) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#? 0 text {'new_name': 'a', 'until_line': 4} +def a(test, v3): + v1 = 3 + v2 = 2 + x = test(v1 + v2 * v3) + return x + + +x = a(test, v3) +# -------------------------------------------------- with-range-2 +#? 2 text {'new_name': 'a', 'until_line': 6, 'until_column': 4} +#foo +v1 = 3 +v2 = 2 +x, y = test(v1 + v2 * v3) +#raaaa +y +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#? 2 text {'new_name': 'a', 'until_line': 6, 'until_column': 4} +def a(test, v3): + #foo + v1 = 3 + v2 = 2 + x, y = test(v1 + v2 * v3) + #raaaa + return y + + +y = a(test, v3) +y +# -------------------------------------------------- with-range-3 +#foo +#? 2 text {'new_name': 'a', 'until_line': 5, 'until_column': 4} +v1 = 3 +v2 = 2 +x, y = test(v1 + v2 * v3) +#raaaa +y +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#foo +#? 2 text {'new_name': 'a', 'until_line': 5, 'until_column': 4} +def a(test, v3): + v1 = 3 + v2 = 2 + x, y = test(v1 + v2 * v3) + return y + + +y = a(test, v3) +#raaaa +y +# -------------------------------------------------- with-range-func-1 +import os +# comment1 +@dec +# comment2 +def x(v1): + #foo + #? 2 text {'new_name': 'a', 'until_line': 9, 'until_column': 5} + v2 = 2 + if 1: + x, y = os.listdir(v1 + v2 * v3) + #bar + return x, y +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +import os +# comment1 +def a(v1, v3): + v2 = 2 + if 1: + x, y = os.listdir(v1 + v2 * v3) + return x, y + + +@dec +# comment2 +def x(v1): + #foo + #? 2 text {'new_name': 'a', 'until_line': 9, 'until_column': 5} + x, y = a(v1, v3) + #bar + return x, y +# -------------------------------------------------- with-range-func-2 +import os +# comment1 +# comment2 +def x(v1): + #? 2 text {'new_name': 'a', 'until_line': 10, 'until_column': 0} + #foo + v2 = 2 + if 1: + x, y = os.listdir(v1 + v2 * v3) + #bar + return y +x +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +import os +# comment1 +# comment2 +def a(v1, v3): + #foo + v2 = 2 + if 1: + x, y = os.listdir(v1 + v2 * v3) + #bar + return y + + +def x(v1): + #? 2 text {'new_name': 'a', 'until_line': 10, 'until_column': 0} + y = a(v1, v3) + return y +x +# -------------------------------------------------- with-range-func-3 +def x(v1): + #? 2 text {'new_name': 'func', 'until_line': 6, 'until_column': 4} + #foo + v2 = 2 + x = v1 * 2 + y = 3 + #bar + return x +x +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +def func(v1): + #foo + v2 = 2 + x = v1 * 2 + return x + + +def x(v1): + #? 2 text {'new_name': 'func', 'until_line': 6, 'until_column': 4} + x = func(v1) + y = 3 + #bar + return x +x +# -------------------------------------------------- in-class-range-1 +class X1: + #? 9 text {'new_name': 'f', 'until_line': 4} + a = 3 + c = a + 2 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +def f(): + a = 3 + c = a + 2 + return c + + +class X1: + #? 9 text {'new_name': 'f', 'until_line': 4} + c = f() +# -------------------------------------------------- in-method-range-1 +glob1 = 1 +class X: + # ha + def g(self): pass + + # haha + def f(self, b, c): + #? 11 text {'new_name': 'ab', 'until_line': 12, 'until_column': 28} + #foo + local1 = 3 + local2 = 4 + x= self.g() or self.f(b) ^ glob1 & b is local1 + # bar +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +glob1 = 1 +class X: + # ha + def g(self): pass + + # haha + def ab(self, b): + #foo + local1 = 3 + local2 = 4 + x= self.g() or self.f(b) ^ glob1 & b is local1 + return x + + def f(self, b, c): + #? 11 text {'new_name': 'ab', 'until_line': 12, 'until_column': 28} + x = self.ab(b) + # bar +# -------------------------------------------------- in-method-range-2 +glob1 = 1 +class X: + # comment + + def f(self, b, c): + #? 11 text {'new_name': 'ab', 'until_line': 11, 'until_column': 10} + #foo + local1 = 3 + local2 = 4 + return local1 * glob1 * b + # bar +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +glob1 = 1 +class X: + # comment + + def ab(self, b): + #foo + local1 = 3 + local2 = 4 + return local1 * glob1 * b + # bar + + def f(self, b, c): + #? 11 text {'new_name': 'ab', 'until_line': 11, 'until_column': 10} + return self.ab(b) +# -------------------------------------------------- in-method-range-3 +glob1 = 1 +class X: + def f(self, b, c): + local1, local2 = 3, 4 + #foo + #? 11 text {'new_name': 'ab', 'until_line': 7, 'until_column': 29} + return local1 & glob1 & b + # bar + local2 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +glob1 = 1 +class X: + def ab(self, local1, b): + return local1 & glob1 & b + + def f(self, b, c): + local1, local2 = 3, 4 + #foo + #? 11 text {'new_name': 'ab', 'until_line': 7, 'until_column': 29} + return self.ab(local1, b) + # bar + local2 +# -------------------------------------------------- in-method-no-param +glob1 = 1 +class X: + def f(): + #? 11 text {'new_name': 'ab', 'until_line': 5, 'until_column': 22} + return glob1 + 2 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +glob1 = 1 +class X: + def ab(): + return glob1 + 2 + + def f(): + #? 11 text {'new_name': 'ab', 'until_line': 5, 'until_column': 22} + return ab() +# -------------------------------------------------- random-return-1 +def x(): + #? 0 error {'new_name': 'ab', 'until_line': 5, 'until_column': 10} + if x: + return 1 + return 1 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +Can only extract return statements if they are at the end. +# -------------------------------------------------- random-return-2 +def x(): + #? 0 error {'new_name': 'ab', 'until_line': 5, 'until_column': 10} + # + return + pass +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +Can only extract return statements if they are at the end. +# -------------------------------------------------- random-yield-1 +def x(): + #? 0 error {'new_name': 'ab', 'until_line': 5, 'until_column': 10} + # + if (yield 1): + return + pass +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +Cannot extract yield statements. +# -------------------------------------------------- random-yield-2 +def x(): + #? 0 error {'new_name': 'ab', 'until_line': 4, 'until_column': 10} + # + try: + yield + finally: + pass +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +Cannot extract yield statements. diff --git a/bundle/jedi-vim/pythonx/jedi/test/refactor/extract_variable.py b/bundle/jedi-vim/pythonx/jedi/test/refactor/extract_variable.py new file mode 100644 index 000000000..880b528c0 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/refactor/extract_variable.py @@ -0,0 +1,260 @@ +# -------------------------------------------------- simple-1 +def test(): + #? 35 text {'new_name': 'a'} + return test(100, (30 + b, c) + 1) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +def test(): + #? 35 text {'new_name': 'a'} + a = (30 + b, c) + 1 + return test(100, a) +# -------------------------------------------------- simple-2 +def test(): + #? 25 text {'new_name': 'a'} + return test(100, (30 + b, c) + 1) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +def test(): + #? 25 text {'new_name': 'a'} + a = 30 + b + return test(100, (a, c) + 1) +# -------------------------------------------------- simple-3 +foo = 3.1 +#? 8 text {'new_name': 'bar'} +x = int(foo + 1) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +foo = 3.1 +#? 8 text {'new_name': 'bar'} +bar = foo + 1 +x = int(bar) +# -------------------------------------------------- simple-4 +#? 13 text {'new_name': 'zzx.x'} +test(100, {1 |1: 2 + 3}) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#? 13 text {'new_name': 'zzx.x'} +zzx.x = 1 |1 +test(100, {zzx.x: 2 + 3}) +# -------------------------------------------------- multiline-1 +def test(): + #? 30 text {'new_name': 'x'} + return test(1, (30 + b, c) + + 1) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +def test(): + #? 30 text {'new_name': 'x'} + x = (30 + b, c) + + 1 + return test(1, x) +# -------------------------------------------------- multiline-2 +def test(): + #? 25 text {'new_name': 'x'} + return test(1, (30 + b, c) + + 1) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +def test(): + #? 25 text {'new_name': 'x'} + x = 30 + b + return test(1, (x, c) + + 1) +# -------------------------------------------------- for-param-error-1 +#? 10 error {'new_name': 'x'} +def test(p1): + return +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +Cannot extract a name that defines something +# -------------------------------------------------- for-param-error-2 +#? 12 error {'new_name': 'x'} +def test(p1= 3): + return +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +Cannot extract a "param" +# -------------------------------------------------- for-param-1 +#? 12 text {'new_name': 'x'} +def test(p1=20): + return +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#? 12 text {'new_name': 'x'} +x = 20 +def test(p1=x): + return +# -------------------------------------------------- for-something +#? 12 text {'new_name': 'x'} +def test(p1=20): + return +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#? 12 text {'new_name': 'x'} +x = 20 +def test(p1=x): + return +# -------------------------------------------------- class-inheritance-1 +#? 12 text {'new_name': 'x'} +class Foo(foo.Bar): + pass +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#? 12 text {'new_name': 'x'} +x = foo.Bar +class Foo(x): + pass +# -------------------------------------------------- class-inheritance-2 +#? 16 text {'new_name': 'x'} +class Foo(foo.Bar): + pass +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#? 16 text {'new_name': 'x'} +x = foo.Bar +class Foo(x): + pass +# -------------------------------------------------- keyword-pass +#? 12 error {'new_name': 'x'} +def x(): pass +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +Cannot extract a "simple_stmt" +# -------------------------------------------------- keyword-continue +#? 5 error {'new_name': 'x'} +continue +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +Cannot extract a "simple_stmt" +# -------------------------------------------------- keyword-None +if 1: + #? 4 text {'new_name': 'x'} + None +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +if 1: + #? 4 text {'new_name': 'x'} + x = None + x +# -------------------------------------------------- with-tuple +#? 4 text {'new_name': 'x'} +x + 1, 3 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#? 4 text {'new_name': 'x'} +x = x + 1 +x, 3 +# -------------------------------------------------- range-1 +#? 4 text {'new_name': 'x', 'until_column': 9} +y + 1, 3 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#? 4 text {'new_name': 'x', 'until_column': 9} +x = y + 1, 3 +x +# -------------------------------------------------- range-2 +#? 1 text {'new_name': 'x', 'until_column': 3} +y + 1, 3 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#? 1 text {'new_name': 'x', 'until_column': 3} +x = y + 1 +x, 3 +# -------------------------------------------------- range-3 +#? 1 text {'new_name': 'x', 'until_column': 6} +y + 1, 3 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#? 1 text {'new_name': 'x', 'until_column': 6} +x = y + 1 +x, 3 +# -------------------------------------------------- range-4 +#? 1 text {'new_name': 'x', 'until_column': 1} +y + 1, 3 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#? 1 text {'new_name': 'x', 'until_column': 1} +x = y +x + 1, 3 +# -------------------------------------------------- addition-1 +#? 4 text {'new_name': 'x', 'until_column': 9} +z = y + 1 + 2+ 3, 3 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#? 4 text {'new_name': 'x', 'until_column': 9} +x = y + 1 +z = x + 2+ 3, 3 +# -------------------------------------------------- addition-2 +#? 8 text {'new_name': 'x', 'until_column': 12} +z = y +1 + 2+ 3, 3 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#? 8 text {'new_name': 'x', 'until_column': 12} +x = 1 + 2 +z = y +x+ 3, 3 +# -------------------------------------------------- addition-3 +#? 10 text {'new_name': 'x', 'until_column': 14} +z = y + 1 + 2+ 3, 3 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#? 10 text {'new_name': 'x', 'until_column': 14} +x = 1 + 2+ 3 +z = y + x, 3 +# -------------------------------------------------- addition-4 +#? 13 text {'new_name': 'x', 'until_column': 17} +z = y + (1 + 2)+ 3, 3 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#? 13 text {'new_name': 'x', 'until_column': 17} +x = (1 + 2)+ 3 +z = y + x, 3 +# -------------------------------------------------- mult-add-1 +#? 8 text {'new_name': 'x', 'until_column': 11} +z = foo(y+1*2+3, 3) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#? 8 text {'new_name': 'x', 'until_column': 11} +x = y+1 +z = foo(x*2+3, 3) +# -------------------------------------------------- mult-add-2 +#? 12 text {'new_name': 'x', 'until_column': 15} +z = foo(y+1*2+3) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#? 12 text {'new_name': 'x', 'until_column': 15} +x = 2+3 +z = foo(y+1*x) +# -------------------------------------------------- mult-add-3 +#? 9 text {'new_name': 'x', 'until_column': 13} +z = (y+1*2+3) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#? 9 text {'new_name': 'x', 'until_column': 13} +x = (y+1*2+3) +z = x +# -------------------------------------------------- extract-weird-1 +#? 0 error {'new_name': 'x', 'until_column': 7} +foo = 3 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +Cannot extract a "expr_stmt" +# -------------------------------------------------- extract-weird-2 +#? 0 error {'new_name': 'x', 'until_column': 5} +def x(): + foo = 3 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +Cannot extract a "funcdef" +# -------------------------------------------------- extract-weird-3 +def x(): +#? 4 error {'new_name': 'x', 'until_column': 8} + if 1: + pass +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +Cannot extract a "if_stmt" +# -------------------------------------------------- extract-weird-4 +#? 4 error {'new_name': 'x', 'until_column': 7} +x = foo = 4 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +Cannot extract a name that defines something +# -------------------------------------------------- keyword-None +#? 4 text {'new_name': 'x', 'until_column': 7} +yy = not foo or bar +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#? 4 text {'new_name': 'x', 'until_column': 7} +x = not foo +yy = x or bar +# -------------------------------------------------- augassign +yy = () +#? 6 text {'new_name': 'x', 'until_column': 10} +yy += 3, 4 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +yy = () +#? 6 text {'new_name': 'x', 'until_column': 10} +x = 3, 4 +yy += x +# -------------------------------------------------- if-else +#? 9 text {'new_name': 'x', 'until_column': 22} +yy = foo(a if y else b) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#? 9 text {'new_name': 'x', 'until_column': 22} +x = a if y else b +yy = foo(x) +# -------------------------------------------------- lambda +#? 8 text {'new_name': 'x', 'until_column': 17} +y = foo(lambda x: 3, 5) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +#? 8 text {'new_name': 'x', 'until_column': 17} +x = lambda x: 3 +y = foo(x, 5) diff --git a/bundle/jedi-vim/pythonx/jedi/test/refactor/import_tree/inline_mod.py b/bundle/jedi-vim/pythonx/jedi/test/refactor/import_tree/inline_mod.py new file mode 100644 index 000000000..0a1303e38 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/refactor/import_tree/inline_mod.py @@ -0,0 +1 @@ +inline_var = 5 + 3 diff --git a/bundle/jedi-vim/pythonx/jedi/test/refactor/import_tree/pkgx/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/refactor/import_tree/pkgx/__init__.py new file mode 100644 index 000000000..85a0765a2 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/refactor/import_tree/pkgx/__init__.py @@ -0,0 +1,2 @@ +def pkgx(): + pass diff --git a/bundle/jedi-vim/pythonx/jedi/test/refactor/import_tree/pkgx/__init__.pyi b/bundle/jedi-vim/pythonx/jedi/test/refactor/import_tree/pkgx/__init__.pyi new file mode 100644 index 000000000..b47d393f1 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/refactor/import_tree/pkgx/__init__.pyi @@ -0,0 +1 @@ +def pkgx() -> int: ... diff --git a/bundle/jedi-vim/pythonx/jedi/test/refactor/import_tree/pkgx/mod.pyi b/bundle/jedi-vim/pythonx/jedi/test/refactor/import_tree/pkgx/mod.pyi new file mode 100644 index 000000000..76bc2c84e --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/refactor/import_tree/pkgx/mod.pyi @@ -0,0 +1 @@ +from . import pkgx diff --git a/bundle/jedi-vim/pythonx/jedi/test/refactor/import_tree/pkgx/mod2.py b/bundle/jedi-vim/pythonx/jedi/test/refactor/import_tree/pkgx/mod2.py new file mode 100644 index 000000000..77968fc95 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/refactor/import_tree/pkgx/mod2.py @@ -0,0 +1 @@ +from .. import pkgx diff --git a/bundle/jedi-vim/pythonx/jedi/test/refactor/import_tree/some_mod.py b/bundle/jedi-vim/pythonx/jedi/test/refactor/import_tree/some_mod.py new file mode 100644 index 000000000..ccb0f5e60 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/refactor/import_tree/some_mod.py @@ -0,0 +1 @@ +foobar = 3 diff --git a/bundle/jedi-vim/pythonx/jedi/test/refactor/inline.py b/bundle/jedi-vim/pythonx/jedi/test/refactor/inline.py new file mode 100644 index 000000000..213ef0be1 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/refactor/inline.py @@ -0,0 +1,248 @@ +# -------------------------------------------------- no-name-error +#? 0 error +1 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +There is no name under the cursor +# -------------------------------------------------- no-reference-error +#? 0 error +a = 1 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +There are no references to this name +# -------------------------------------------------- multi-equal-error +def test(): + #? 4 error + a = b = 3 + return test(100, a) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +Cannot inline a statement with multiple definitions +# -------------------------------------------------- no-definition-error +#? 5 error +test(a) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +No definition found to inline +# -------------------------------------------------- multi-names-error +#? 0 error +a, b[1] = 3 +test(a) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +Cannot inline a statement with multiple definitions +# -------------------------------------------------- addition-error +#? 0 error +a = 2 +a += 3 +test(a) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +Cannot inline a name with multiple definitions +# -------------------------------------------------- only-addition-error +#? 0 error +a += 3 +test(a) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +Cannot inline a statement with "+=" +# -------------------------------------------------- with-annotation +foobarb: int = 1 +#? 5 +test(foobarb) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +--- inline.py ++++ inline.py +@@ -1,4 +1,3 @@ +-foobarb: int = 1 + #? 5 +-test(foobarb) ++test(1) +# -------------------------------------------------- only-annotation-error +a: int +#? 5 error +test(a) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +Cannot inline a statement that is defined by an annotation +# -------------------------------------------------- builtin +import math +#? 7 error +math.cos +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +Cannot inline builtins/extensions +# -------------------------------------------------- module-error +from import_tree import inline_mod +#? 11 error +test(inline_mod) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +Cannot inline imports, modules or namespaces +# -------------------------------------------------- module-works +from import_tree import inline_mod +#? 22 +test(x, inline_mod. inline_var.conjugate) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +--- import_tree/inline_mod.py ++++ import_tree/inline_mod.py +@@ -1,2 +1 @@ +-inline_var = 5 + 3 +--- inline.py ++++ inline.py +@@ -1,4 +1,4 @@ + from import_tree import inline_mod + #? 22 +-test(x, inline_mod. inline_var.conjugate) ++test(x, (5 + 3).conjugate) +# -------------------------------------------------- class +class A: pass +#? 5 error +test(A) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +Cannot inline a class +# -------------------------------------------------- function +def foo(a): + return a + 1 +#? 5 error +test(foo(1)) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +Cannot inline a function +# -------------------------------------------------- for-stmt +for x in []: + #? 9 error + test(x) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +Cannot inline a for_stmt +# -------------------------------------------------- simple +def test(): + #? 4 + a = (30 + b, c) + 1 + return test(100, a) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +--- inline.py ++++ inline.py +@@ -1,5 +1,4 @@ + def test(): + #? 4 +- a = (30 + b, c) + 1 +- return test(100, a) ++ return test(100, (30 + b, c) + 1) +# -------------------------------------------------- tuple +if 1: + #? 4 + a = 1, 2 + return test(100, a) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +--- inline.py ++++ inline.py +@@ -1,5 +1,4 @@ + if 1: + #? 4 +- a = 1, 2 +- return test(100, a) ++ return test(100, (1, 2)) +# -------------------------------------------------- multiplication-add-parens1 +a = 1+2 +#? 11 +test(100 * a) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +--- inline.py ++++ inline.py +@@ -1,4 +1,3 @@ +-a = 1+2 + #? 11 +-test(100 * a) ++test(100 * (1+2)) +# -------------------------------------------------- multiplication-add-parens2 +a = 1+2 +#? 11 +(x, 100 * a) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +--- inline.py ++++ inline.py +@@ -1,4 +1,3 @@ +-a = 1+2 + #? 11 +-(x, 100 * a) ++(x, 100 * (1+2)) +# -------------------------------------------------- multiplication-add-parens3 +x +a = 1+2 +#? 9 +(100 ** a) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +--- inline.py ++++ inline.py +@@ -1,5 +1,4 @@ + x +-a = 1+2 + #? 9 +-(100 ** a) ++(100 ** (1+2)) +# -------------------------------------------------- no-add-parens1 +x +a = 1+2 +#? 5 +test(a) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +--- inline.py ++++ inline.py +@@ -1,5 +1,4 @@ + x +-a = 1+2 + #? 5 +-test(a) ++test(1+2) +# -------------------------------------------------- no-add-parens2 +a = 1+2 +#? 9 +test(3, a) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +--- inline.py ++++ inline.py +@@ -1,4 +1,3 @@ +-a = 1+2 + #? 9 +-test(3, a) ++test(3, 1+2) +# -------------------------------------------------- no-add-parens3 +a = 1|2 +#? 5 +(3, a) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +--- inline.py ++++ inline.py +@@ -1,4 +1,3 @@ +-a = 1|2 + #? 5 +-(3, a) ++(3, 1|2) +# -------------------------------------------------- comment +a = 1 and 2 # foo +#? 9 +(3, 3 * a) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +--- inline.py ++++ inline.py +@@ -1,4 +1,4 @@ +-a = 1 and 2 # foo ++ # foo + #? 9 +-(3, 3 * a) ++(3, 3 * (1 and 2)) +# -------------------------------------------------- semicolon +a = 1, 2 ; b = 3 +#? 9 +(3, 3 == a) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +--- inline.py ++++ inline.py +@@ -1,4 +1,4 @@ +-a = 1, 2 ; b = 3 ++ b = 3 + #? 9 +-(3, 3 == a) ++(3, 3 == (1, 2)) +# -------------------------------------------------- no-tree-name +a = 1 + 2 +#? 0 +a.conjugate +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +--- inline.py ++++ inline.py +@@ -1,4 +1,3 @@ +-a = 1 + 2 + #? 0 +-a.conjugate ++(1 + 2).conjugate diff --git a/bundle/jedi-vim/pythonx/jedi/test/refactor/rename.py b/bundle/jedi-vim/pythonx/jedi/test/refactor/rename.py new file mode 100644 index 000000000..864032b82 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/refactor/rename.py @@ -0,0 +1,235 @@ +""" +Test coverage for renaming is mostly being done by testing +`Script.get_references`. +""" + +# -------------------------------------------------- no-name +#? 0 error {'new_name': 'blabla'} +1 +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +There is no name under the cursor +# -------------------------------------------------- simple +def test1(): + #? 7 {'new_name': 'blabla'} + test1() + AssertionError + return test1, test1.not_existing +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +--- rename.py ++++ rename.py +@@ -1,6 +1,6 @@ +-def test1(): ++def blabla(): + #? 7 {'new_name': 'blabla'} +- test1() ++ blabla() + AssertionError +- return test1, test1.not_existing ++ return blabla, blabla.not_existing +# -------------------------------------------------- var-not-found +undefined_var +#? 0 {'new_name': 'lala'} +undefined_var +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +--- rename.py ++++ rename.py +@@ -1,4 +1,4 @@ + undefined_var + #? 0 {'new_name': 'lala'} +-undefined_var ++lala +# -------------------------------------------------- different-scopes +def x(): + #? 7 {'new_name': 'v'} + some_var = 3 + some_var +def y(): + some_var = 3 + some_var +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +--- rename.py ++++ rename.py +@@ -1,7 +1,7 @@ + def x(): + #? 7 {'new_name': 'v'} +- some_var = 3 +- some_var ++ v = 3 ++ v + def y(): + some_var = 3 + some_var +# -------------------------------------------------- keyword-param1 +#? 22 {'new_name': 'lala'} +def mykeywordparam1(param1): + str(param1) +mykeywordparam1(1) +mykeywordparam1(param1=3) +mykeywordparam1(x, param1=2) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +--- rename.py ++++ rename.py +@@ -1,7 +1,7 @@ + #? 22 {'new_name': 'lala'} +-def mykeywordparam1(param1): +- str(param1) ++def mykeywordparam1(lala): ++ str(lala) + mykeywordparam1(1) +-mykeywordparam1(param1=3) +-mykeywordparam1(x, param1=2) ++mykeywordparam1(lala=3) ++mykeywordparam1(x, lala=2) +# -------------------------------------------------- keyword-param2 +def mykeywordparam2(param1): + str(param1) +mykeywordparam2(1) +mykeywordparam2(param1=3) +#? 22 {'new_name': 'lala'} +mykeywordparam2(x, param1=2) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +--- rename.py ++++ rename.py +@@ -1,7 +1,7 @@ +-def mykeywordparam2(param1): +- str(param1) ++def mykeywordparam2(lala): ++ str(lala) + mykeywordparam2(1) +-mykeywordparam2(param1=3) ++mykeywordparam2(lala=3) + #? 22 {'new_name': 'lala'} +-mykeywordparam2(x, param1=2) ++mykeywordparam2(x, lala=2) +# -------------------------------------------------- import +from import_tree.some_mod import foobar +#? 0 {'new_name': 'renamed'} +foobar +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +--- import_tree/some_mod.py ++++ import_tree/some_mod.py +@@ -1,2 +1,2 @@ +-foobar = 3 ++renamed = 3 +--- rename.py ++++ rename.py +@@ -1,4 +1,4 @@ +-from import_tree.some_mod import foobar ++from import_tree.some_mod import renamed + #? 0 {'new_name': 'renamed'} +-foobar ++renamed +# -------------------------------------------------- module +from import_tree import some_mod +#? 0 {'new_name': 'renamedm'} +some_mod +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +rename from import_tree/some_mod.py +rename to import_tree/renamedm.py +--- rename.py ++++ rename.py +@@ -1,4 +1,4 @@ +-from import_tree import some_mod ++from import_tree import renamedm + #? 0 {'new_name': 'renamedm'} +-some_mod ++renamedm +# -------------------------------------------------- import-not-found +#? 20 {'new_name': 'lala'} +import undefined_import +haha( undefined_import) +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +--- rename.py ++++ rename.py +@@ -1,4 +1,4 @@ + #? 20 {'new_name': 'lala'} +-import undefined_import +-haha( undefined_import) ++import lala ++haha( lala) +# -------------------------------------------------- in-package-with-stub +#? 31 {'new_name': 'renamedm'} +from import_tree.pkgx import pkgx +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +--- import_tree/pkgx/__init__.py ++++ import_tree/pkgx/__init__.py +@@ -1,3 +1,3 @@ +-def pkgx(): ++def renamedm(): + pass +--- import_tree/pkgx/__init__.pyi ++++ import_tree/pkgx/__init__.pyi +@@ -1,2 +1,2 @@ +-def pkgx() -> int: ... ++def renamedm() -> int: ... +--- import_tree/pkgx/mod.pyi ++++ import_tree/pkgx/mod.pyi +@@ -1,2 +1,2 @@ +-from . import pkgx ++from . import renamedm +--- rename.py ++++ rename.py +@@ -1,3 +1,3 @@ + #? 31 {'new_name': 'renamedm'} +-from import_tree.pkgx import pkgx ++from import_tree.pkgx import renamedm +# -------------------------------------------------- package-with-stub +#? 18 {'new_name': 'renamedp'} +from import_tree.pkgx +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +rename from import_tree/pkgx +rename to import_tree/renamedp +--- import_tree/pkgx/mod2.py ++++ import_tree/renamedp/mod2.py +@@ -1,2 +1,2 @@ +-from .. import pkgx ++from .. import renamedp +--- rename.py ++++ rename.py +@@ -1,3 +1,3 @@ + #? 18 {'new_name': 'renamedp'} +-from import_tree.pkgx ++from import_tree.renamedp +# -------------------------------------------------- weird-package-mix +if random_undefined_variable: + from import_tree.pkgx import pkgx +else: + from import_tree import pkgx +#? 4 {'new_name': 'rename'} +pkgx +# ++++++++++++++++++++++++++++++++++++++++++++++++++ +rename from import_tree/pkgx +rename to import_tree/rename +--- import_tree/pkgx/__init__.py ++++ import_tree/rename/__init__.py +@@ -1,3 +1,3 @@ +-def pkgx(): ++def rename(): + pass +--- import_tree/pkgx/__init__.pyi ++++ import_tree/rename/__init__.pyi +@@ -1,2 +1,2 @@ +-def pkgx() -> int: ... ++def rename() -> int: ... +--- import_tree/pkgx/mod.pyi ++++ import_tree/rename/mod.pyi +@@ -1,2 +1,2 @@ +-from . import pkgx ++from . import rename +--- import_tree/pkgx/mod2.py ++++ import_tree/rename/mod2.py +@@ -1,2 +1,2 @@ +-from .. import pkgx ++from .. import rename +--- rename.py ++++ rename.py +@@ -1,7 +1,7 @@ + if random_undefined_variable: +- from import_tree.pkgx import pkgx ++ from import_tree.rename import rename + else: +- from import_tree import pkgx ++ from import_tree import rename + #? 4 {'new_name': 'rename'} +-pkgx ++rename diff --git a/bundle/jedi-vim/pythonx/jedi/test/run.py b/bundle/jedi-vim/pythonx/jedi/test/run.py new file mode 100644 index 000000000..1f1c9dc93 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/run.py @@ -0,0 +1,544 @@ +#!/usr/bin/env python3 +""" +|jedi| is mostly being tested by what I would call "integration tests". These +tests are testing type inference with the public API. This makes a +lot of sense for |jedi|. Also, it is hard to write doctests/unittests for +the internal data structures. + +There are different kinds of tests: + +- completions / inference ``#?`` +- goto: ``#!`` +- references: ``#<`` + +How to run tests? ++++++++++++++++++ + +Jedi uses pytest_ to run unit and integration tests. To run tests, +simply run ``pytest``. + +.. _pytest: http://pytest.org + +Most integration test cases are located in the ``test/completion`` directory +and each test case starts with one of these comments: + +- ``#?`` (completions / inference) +- ``#!`` (goto) +- ``#<`` (references) + +There is also support for third party libraries. In a normal test run they are +not being executed, you have to provide a ``--thirdparty`` option. + +In addition to pytest's ``-k`` and ``-m`` options, you can use the +``-T`` (``--test-files`) option to specify which test cases should run. +It takes the format of ``FILE_NAME[:LINE[,LINE[,...]]]`` where +``FILE_NAME`` is a file in ``test/completion`` and ``LINE`` is a line +number of the test comment. Here are some examples: + +Run tests only in ``completion/basic.py`` and ``completion/imports.py``:: + + pytest test/test_integration.py -T basic.py -T imports.py + +Run test at line 4, 6, and 8 in ``completion/basic.py``:: + + pytest test/test_integration.py -T basic.py:4,6,8 + +See ``pytest --help`` for more information. + +If you want to debug a test, just use the ``--pdb`` option. + +Alternate Test Runner ++++++++++++++++++++++ + +If you don't like the output of ``pytest``, there's an alternate test runner +that you can start by running ``./run.py``. The above example could be run by:: + + ./run.py basic 4 6 8 50-80 + +The advantage of this runner is simplicity and more customized error reports. + +Auto-Completion Tests ++++++++++++++++++++++ + +Uses a comment to specify a test on the next line. The comment defines the +expected completions. The comment always begins with `#?`. The last row +symbolizes the cursor. For example:: + + #? ['upper'] + a = 'foo'; a.upp + +Inference Tests ++++++++++++++++ + +Inference tests look very simliar. The difference is that inference tests don't +use brackets:: + + #? int() + ab = 3; ab + +Goto Tests +++++++++++ + +Goto Tests look like this:: + + abc = 1 + #! ['abc=1'] + abc + +Additionally it is possible to specify the column by adding a number, which +describes the position of the test (otherwise it's just the end of line):: + + #! 2 ['abc=1'] + abc + +Reference Tests ++++++++++++++++ + +Tests look like this:: + + abc = 1 + #< (1,0), (3,0) + abc +""" +import os +import re +import sys +import operator +if sys.version_info < (3, 8): + literal_eval = eval +else: + from ast import literal_eval +from io import StringIO +from functools import reduce +from unittest.mock import ANY +from pathlib import Path + +import parso +from _pytest.outcomes import Skipped +import pytest + +import jedi +from jedi import debug +from jedi.api.classes import Name +from jedi.api.completion import get_user_context +from jedi import parser_utils +from jedi.api.environment import get_default_environment, get_system_environment +from jedi.inference.gradual.conversion import convert_values +from jedi.inference.analysis import Warning + +test_dir = Path(__file__).absolute().parent + +TEST_COMPLETIONS = 0 +TEST_INFERENCE = 1 +TEST_GOTO = 2 +TEST_REFERENCES = 3 + + +grammar36 = parso.load_grammar(version='3.6') + + +class BaseTestCase(object): + def __init__(self, skip_version_info=None): + self._skip_version_info = skip_version_info + self._skip = None + + def set_skip(self, reason): + self._skip = reason + + def get_skip_reason(self, environment): + if self._skip is not None: + return self._skip + + if self._skip_version_info is None: + return + + comp_map = { + '==': 'eq', + '<=': 'le', + '>=': 'ge', + '<': 'lt', + '>': 'gt', + } + min_version, operator_ = self._skip_version_info + operation = getattr(operator, comp_map[operator_]) + if not operation(environment.version_info[:2], min_version): + return "Python version %s %s.%s" % ( + operator_, min_version[0], min_version[1] + ) + + +class IntegrationTestCase(BaseTestCase): + def __init__(self, test_type, correct, line_nr, column, start, line, + path=None, skip_version_info=None): + super().__init__(skip_version_info) + self.test_type = test_type + self.correct = correct + self.line_nr = line_nr + self.column = column + self.start = start + self.line = line + self.path = path + self._project = jedi.Project(test_dir) + + @property + def module_name(self): + return os.path.splitext(os.path.basename(self.path))[0] + + @property + def line_nr_test(self): + """The test is always defined on the line before.""" + return self.line_nr - 1 + + def __repr__(self): + return '<%s: %s:%s %r>' % (self.__class__.__name__, self.path, + self.line_nr_test, self.line.rstrip()) + + def script(self, environment): + return jedi.Script( + self.source, + path=self.path, + environment=environment, + project=self._project + ) + + def run(self, compare_cb, environment=None): + testers = { + TEST_COMPLETIONS: self.run_completion, + TEST_INFERENCE: self.run_inference, + TEST_GOTO: self.run_goto, + TEST_REFERENCES: self.run_get_references, + } + if (self.path.endswith('pytest.py') or self.path.endswith('conftest.py')) \ + and environment.executable != os.path.realpath(sys.executable): + # It's not guarantueed that pytest is installed in test + # environments, if we're not running in the same environment that + # we're already in, so just skip that case. + pytest.skip() + return testers[self.test_type](compare_cb, environment) + + def run_completion(self, compare_cb, environment): + completions = self.script(environment).complete(self.line_nr, self.column) + # import cProfile; cProfile.run('...') + + comp_str = {c.name for c in completions} + for r in completions: + # Test if this access raises an error + assert isinstance(r.type, str) + return compare_cb(self, comp_str, set(literal_eval(self.correct))) + + def run_inference(self, compare_cb, environment): + script = self.script(environment) + inference_state = script._inference_state + + def comparison(definition): + suffix = '()' if definition.type == 'instance' else '' + return definition.full_name + suffix + + def definition(correct, correct_start, path): + should_be = set() + for match in re.finditer('(?:[^ ]+)', correct): + string = match.group(0) + parser = grammar36.parse(string, start_symbol='eval_input', error_recovery=False) + parser_utils.move(parser.get_root_node(), self.line_nr) + node = parser.get_root_node() + module_context = script._get_module_context() + user_context = get_user_context(module_context, (self.line_nr, 0)) + node.parent = user_context.tree_node + results = convert_values(user_context.infer_node(node)) + if not results: + raise Exception('Could not resolve %s on line %s' + % (match.string, self.line_nr - 1)) + + should_be |= set(Name(inference_state, r.name) for r in results) + debug.dbg('Finished getting types', color='YELLOW') + + # Because the objects have different ids, `repr`, then compare. + should = set(comparison(r) for r in should_be) + return should + + should = definition(self.correct, self.start, script.path) + result = script.infer(self.line_nr, self.column) + is_str = set(comparison(r) for r in result) + for r in result: + # Test if this access raises an error + assert isinstance(r.type, str) + return compare_cb(self, is_str, should) + + def run_goto(self, compare_cb, environment): + result = self.script(environment).goto(self.line_nr, self.column) + comp_str = str(sorted(str(r.description) for r in result)) + return compare_cb(self, comp_str, self.correct) + + def run_get_references(self, compare_cb, environment): + result = self.script(environment).get_references(self.line_nr, self.column) + self.correct = self.correct.strip() + compare = sorted( + (('stub:' if r.is_stub() else '') + + re.sub(r'^completion\.', '', r.module_name), + r.line, + r.column) + for r in result + ) + wanted = [] + if not self.correct: + positions = [] + else: + positions = literal_eval(self.correct) + for pos_tup in positions: + if type(pos_tup[0]) == str: + # this means that there is a module specified + if pos_tup[1] == ...: + pos_tup = pos_tup[0], ANY, pos_tup[2] + wanted.append(pos_tup) + else: + line = pos_tup[0] + if pos_tup[0] is not None: + line += self.line_nr + wanted.append((self.module_name, line, pos_tup[1])) + + return compare_cb(self, compare, sorted(wanted)) + + +class StaticAnalysisCase(BaseTestCase): + """ + Static Analysis cases lie in the static_analysis folder. + The tests also start with `#!`, like the inference tests. + """ + def __init__(self, path): + self._path = path + self.name = os.path.basename(path) + with open(path) as f: + self._source = f.read() + + skip_version_info = None + for line in self._source.splitlines(): + skip_version_info = skip_python_version(line) or skip_version_info + + super().__init__(skip_version_info) + + def collect_comparison(self): + cases = [] + for line_nr, line in enumerate(self._source.splitlines(), 1): + match = re.match(r'(\s*)#! (\d+ )?(.*)$', line) + if match is not None: + column = int(match.group(2) or 0) + len(match.group(1)) + cases.append((line_nr + 1, column, match.group(3))) + return cases + + def run(self, compare_cb, environment): + def typ_str(inst): + return 'warning ' if isinstance(inst, Warning) else '' + + analysis = jedi.Script( + self._source, + path=self._path, + environment=environment, + )._analysis() + analysis = [(r.line, r.column, typ_str(r) + r.name) + for r in analysis] + compare_cb(self, analysis, self.collect_comparison()) + + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, os.path.basename(self._path)) + + +def skip_python_version(line): + # check for python minimal version number + match = re.match(r" *# *python *([<>]=?|==) *(\d+(?:\.\d+)?)$", line) + if match: + minimal_python_version = tuple(map(int, match.group(2).split("."))) + return minimal_python_version, match.group(1) + return None + + +def collect_file_tests(path, lines, lines_to_execute): + def makecase(t): + return IntegrationTestCase(t, correct, line_nr, column, + start, line, path=path, + skip_version_info=skip_version_info) + + start = None + correct = None + test_type = None + skip_version_info = None + for line_nr, line in enumerate(lines, 1): + if correct is not None: + r = re.match(r'^(\d+)\s*(.*)$', correct) + if r: + column = int(r.group(1)) + correct = r.group(2) + start += r.regs[2][0] # second group, start index + else: + column = len(line) - 1 # -1 for the \n + if test_type == '!': + yield makecase(TEST_GOTO) + elif test_type == '<': + yield makecase(TEST_REFERENCES) + elif correct.startswith('['): + yield makecase(TEST_COMPLETIONS) + else: + yield makecase(TEST_INFERENCE) + correct = None + else: + skip_version_info = skip_python_version(line) or skip_version_info + try: + r = re.search(r'(?:^|(?<=\s))#([?!<])\s*([^\n]*)', line) + # test_type is ? for completion and ! for goto + test_type = r.group(1) + correct = r.group(2) + # Quick hack to make everything work (not quite a bloody unicorn hack though). + if correct == '': + correct = ' ' + start = r.start() + except AttributeError: + correct = None + else: + # Skip the test, if this is not specified test. + for l in lines_to_execute: + if isinstance(l, tuple) and l[0] <= line_nr <= l[1] \ + or line_nr == l: + break + else: + if lines_to_execute: + correct = None + + +def collect_dir_tests(base_dir, test_files, check_thirdparty=False): + for f_name in os.listdir(base_dir): + files_to_execute = [a for a in test_files.items() if f_name.startswith(a[0])] + lines_to_execute = reduce(lambda x, y: x + y[1], files_to_execute, []) + if f_name.endswith(".py") and (not test_files or files_to_execute): + skip = None + if check_thirdparty: + lib = f_name.replace('_.py', '') + try: + # there is always an underline at the end. + # It looks like: completion/thirdparty/pylab_.py + __import__(lib) + except ImportError: + skip = 'Thirdparty-Library %s not found.' % lib + + path = os.path.join(base_dir, f_name) + + with open(path, newline='') as f: + source = f.read() + + for case in collect_file_tests(path, StringIO(source), + lines_to_execute): + case.source = source + if skip: + case.set_skip(skip) + yield case + + +docoptstr = """ +Using run.py to make debugging easier with integration tests. + +An alternative testing format, which is much more hacky, but very nice to +work with. + +Usage: + run.py [--pdb] [--debug] [--thirdparty] [--env ] [...] + run.py --help + +Options: + -h --help Show this screen. + --pdb Enable pdb debugging on fail. + -d, --debug Enable text output debugging (please install ``colorama``). + --thirdparty Also run thirdparty tests (in ``completion/thirdparty``). + --env A Python version, like 3.9, 3.8, etc. +""" +if __name__ == '__main__': + import docopt + arguments = docopt.docopt(docoptstr) + + import time + t_start = time.time() + + if arguments['--debug']: + jedi.set_debug_function() + + # get test list, that should be executed + test_files = {} + last = None + for arg in arguments['']: + match = re.match(r'(\d+)-(\d+)', arg) + if match: + start, end = match.groups() + test_files[last].append((int(start), int(end))) + elif arg.isdigit(): + if last is None: + continue + test_files[last].append(int(arg)) + else: + test_files[arg] = [] + last = arg + + # completion tests: + dir_ = os.path.dirname(os.path.realpath(__file__)) + completion_test_dir = os.path.join(dir_, '../test/completion') + completion_test_dir = os.path.abspath(completion_test_dir) + tests_fail = 0 + + # execute tests + cases = list(collect_dir_tests(completion_test_dir, test_files)) + if test_files or arguments['--thirdparty']: + completion_test_dir += '/thirdparty' + cases += collect_dir_tests(completion_test_dir, test_files, True) + + def file_change(current, tests, fails): + if current is None: + current = '' + else: + current = os.path.basename(current) + print('{:25} {} tests and {} fails.'.format(current, tests, fails)) + + def report(case, actual, desired): + if actual == desired: + return 0 + else: + print("\ttest fail @%d, actual = %s, desired = %s" + % (case.line_nr - 1, actual, desired)) + return 1 + + if arguments['--env']: + environment = get_system_environment(arguments['--env']) + else: + # Will be 3.6. + environment = get_default_environment() + + import traceback + current = cases[0].path if cases else None + count = fails = 0 + for c in cases: + if c.get_skip_reason(environment): + continue + if current != c.path: + file_change(current, count, fails) + current = c.path + count = fails = 0 + + try: + if c.run(report, environment): + tests_fail += 1 + fails += 1 + except Exception: + traceback.print_exc() + print("\ttest fail @%d" % (c.line_nr - 1)) + tests_fail += 1 + fails += 1 + if arguments['--pdb']: + import pdb + pdb.post_mortem() + except Skipped: + pass + + count += 1 + + file_change(current, count, fails) + + print('\nSummary: (%s fails of %s tests) in %.3fs' + % (tests_fail, len(cases), time.time() - t_start)) + + exit_code = 1 if tests_fail else 0 + sys.exit(exit_code) diff --git a/bundle/jedi-vim/pythonx/jedi/test/static_analysis/attribute_error.py b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/attribute_error.py new file mode 100644 index 000000000..7ceb93972 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/attribute_error.py @@ -0,0 +1,113 @@ +class Cls(): + class_attr = '' + def __init__(self, input): + self.instance_attr = 3 + self.input = input + + def f(self): + #! 12 attribute-error + return self.not_existing + + def undefined_object(self, obj): + """ + Uses an arbitrary object and performs an operation on it, shouldn't + be a problem. + """ + obj.arbitrary_lookup + + def defined_lookup(self, obj): + """ + `obj` is defined by a call into this function. + """ + obj.upper + #! 4 attribute-error + obj.arbitrary_lookup + + #! 13 name-error + class_attr = a + +Cls(1).defined_lookup('') + +c = Cls(1) +c.class_attr +Cls.class_attr +#! 4 attribute-error +Cls.class_attr_error +c.instance_attr +#! 2 attribute-error +c.instance_attr_error + + +c.something = None + +#! 12 name-error +something = a +something + +# ----------------- +# Unused array variables should still raise attribute errors. +# ----------------- + +# should not raise anything. +for loop_variable in [1, 2]: + #! 4 name-error + x = undefined + loop_variable + +#! 28 name-error +for loop_variable in [1, 2, undefined]: + pass + +#! 7 attribute-error +[1, ''.undefined_attr] + + +def return_one(something): + return 1 + +#! 14 attribute-error +return_one(''.undefined_attribute) + +#! 12 name-error +[r for r in undefined] + +#! 1 name-error +[undefined for r in [1, 2]] + +[r for r in [1, 2]] + +# some random error that showed up +class NotCalled(): + def match_something(self, param): + seems_to_need_an_assignment = param + return [value.match_something() for value in []] + +# ----------------- +# decorators +# ----------------- + +#! 1 name-error +@undefined_decorator +def func(): + return 1 + +# ----------------- +# operators +# ----------------- + +string = '%s %s' % (1, 2) + +# Shouldn't raise an error, because `string` is really just a string, not an +# array or something. +string.upper + +# ----------------- +# imports +# ----------------- + +# Star imports and the like in modules should not cause attribute errors in +# this module. +import import_tree + +import_tree.a +import_tree.b diff --git a/bundle/jedi-vim/pythonx/jedi/test/static_analysis/attribute_warnings.py b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/attribute_warnings.py new file mode 100644 index 000000000..0e1e5e955 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/attribute_warnings.py @@ -0,0 +1,46 @@ +""" +Jedi issues warnings for possible errors if ``__getattr__``, +``__getattribute__`` or ``setattr`` are used. +""" + +# ----------------- +# __getattr*__ +# ----------------- + + +class Cls(): + def __getattr__(self, name): + return getattr(str, name) + + +Cls().upper + +#! 6 warning attribute-error +Cls().undefined + + +class Inherited(Cls): + pass + +Inherited().upper + +#! 12 warning attribute-error +Inherited().undefined + +# ----------------- +# setattr +# ----------------- + + +class SetattrCls(): + def __init__(self, dct): + # Jedi doesn't even try to understand such code + for k, v in dct.items(): + setattr(self, k, v) + + self.defined = 3 + +c = SetattrCls({'a': 'b'}) +c.defined +#! 2 warning attribute-error +c.undefined diff --git a/bundle/jedi-vim/pythonx/jedi/test/static_analysis/branches.py b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/branches.py new file mode 100644 index 000000000..14ea9a7bf --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/branches.py @@ -0,0 +1,55 @@ +# ----------------- +# Simple tests +# ----------------- + +import random + +if random.choice([0, 1]): + x = '' +else: + x = 1 +if random.choice([0, 1]): + y = '' +else: + y = 1 + +# A simple test +if x != 1: + x.upper() +else: + #! 2 attribute-error + x.upper() + pass + +# This operation is wrong, because the types could be different. +#! 6 type-error-operation +z = x + y +# However, here we have correct types. +if x == y: + z = x + y +else: + #! 6 type-error-operation + z = x + y + + +# TODO enable this one. +#x = 3 +#if x != 1: +# x.upper() + +# ----------------- +# With a function +# ----------------- + +def addition(a, b): + if type(a) == type(b): + # Might still be a type error, we might want to change this in the + # future. + #! 9 type-error-operation + return a + b + else: + #! 9 type-error-operation + return a + b + +addition(1, 1) +addition(1.0, '') diff --git a/bundle/jedi-vim/pythonx/jedi/test/static_analysis/builtins.py b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/builtins.py new file mode 100644 index 000000000..86caca65d --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/builtins.py @@ -0,0 +1,11 @@ +# ---------- +# isinstance +# ---------- + +isinstance(1, int) +isinstance(1, (int, str)) + +#! 14 type-error-isinstance +isinstance(1, 1) +#! 14 type-error-isinstance +isinstance(1, [int, str]) diff --git a/bundle/jedi-vim/pythonx/jedi/test/static_analysis/class_simple.py b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/class_simple.py new file mode 100644 index 000000000..3f84fde04 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/class_simple.py @@ -0,0 +1,13 @@ +class Base(object): + class Nested(): + def foo(): + pass + + +class X(Base.Nested): + pass + + +X().foo() +#! 4 attribute-error +X().bar() diff --git a/bundle/jedi-vim/pythonx/jedi/test/static_analysis/comprehensions.py b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/comprehensions.py new file mode 100644 index 000000000..8701b1124 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/comprehensions.py @@ -0,0 +1,42 @@ +[a + 1 for a in [1, 2]] + +#! 3 type-error-operation +[a + '' for a in [1, 2]] +#! 3 type-error-operation +(a + '' for a in [1, 2]) + +#! 12 type-error-not-iterable +[a for a in 1] + +tuple(str(a) for a in [1]) + +#! 8 type-error-operation +tuple(a + 3 for a in ['']) + +# ---------- +# Some variables within are not defined +# ---------- + +abcdef = [] +#! 12 name-error +[1 for a in NOT_DEFINFED for b in abcdef if 1] + +#! 25 name-error +[1 for a in [1] for b in NOT_DEFINED if 1] + +#! 12 name-error +[1 for a in NOT_DEFINFED for b in [1] if 1] + +#! 19 name-error +(1 for a in [1] if NOT_DEFINED) + +# ---------- +# unbalanced sides. +# ---------- + +# ok +(1 for a, b in [(1, 2)]) +#! 13 value-error-too-few-values +(1 for a, b, c in [(1, 2)]) +#! 10 value-error-too-many-values +(1 for a, b in [(1, 2, 3)]) diff --git a/bundle/jedi-vim/pythonx/jedi/test/static_analysis/descriptors.py b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/descriptors.py new file mode 100644 index 000000000..0fc5d1595 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/descriptors.py @@ -0,0 +1,13 @@ +# classmethod +class TarFile(): + @classmethod + def open(cls, name, **kwargs): + return cls.taropen(name, **kwargs) + + @classmethod + def taropen(cls, name, **kwargs): + return name + + +# should just work +TarFile.open('hallo') diff --git a/bundle/jedi-vim/pythonx/jedi/test/static_analysis/generators.py b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/generators.py new file mode 100644 index 000000000..b9418002e --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/generators.py @@ -0,0 +1,7 @@ +def generator(): + yield 1 + +#! 11 type-error-not-subscriptable +generator()[0] + +list(generator())[0] diff --git a/bundle/jedi-vim/pythonx/jedi/test/static_analysis/import_tree/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/import_tree/__init__.py new file mode 100644 index 000000000..cb485f13e --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/import_tree/__init__.py @@ -0,0 +1,5 @@ +""" +Another import tree, this time not for completion, but static analysis. +""" + +from .a import * diff --git a/bundle/jedi-vim/pythonx/jedi/test/static_analysis/import_tree/a.py b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/import_tree/a.py new file mode 100644 index 000000000..b02981cc0 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/import_tree/a.py @@ -0,0 +1 @@ +from . import b diff --git a/bundle/jedi-vim/pythonx/jedi/test/static_analysis/import_tree/b.py b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/import_tree/b.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/jedi/test/static_analysis/imports.py b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/imports.py new file mode 100644 index 000000000..02a08c7d6 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/imports.py @@ -0,0 +1,25 @@ + +#! 7 import-error +import not_existing + +import os + +from os.path import abspath +#! 20 import-error +from os.path import not_existing + +from datetime import date +date.today + +#! 5 attribute-error +date.not_existing_attribute + +#! 14 import-error +from datetime.date import today + +#! 16 import-error +import datetime.datetime +#! 7 import-error +import not_existing_nested.date + +import os.path diff --git a/bundle/jedi-vim/pythonx/jedi/test/static_analysis/iterable.py b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/iterable.py new file mode 100644 index 000000000..0eae367dc --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/iterable.py @@ -0,0 +1,21 @@ + +a, b = {'asdf': 3, 'b': 'str'} +a + +x = [1] +x[0], b = {'a': 1, 'b': '2'} + +dct = {3: ''} +for x in dct: + pass + +#! 4 type-error-not-iterable +for x, y in dct: + pass + +# Shouldn't cause issues, because if there are no types (or we don't know what +# the types are, we should just ignore it. +#! 0 value-error-too-few-values +a, b = [] +#! 7 name-error +a, b = NOT_DEFINED diff --git a/bundle/jedi-vim/pythonx/jedi/test/static_analysis/keywords.py b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/keywords.py new file mode 100644 index 000000000..e3fcaa432 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/keywords.py @@ -0,0 +1,7 @@ +def raises(): + raise KeyError() + + +def wrong_name(): + #! 6 name-error + raise NotExistingException() diff --git a/bundle/jedi-vim/pythonx/jedi/test/static_analysis/normal_arguments.py b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/normal_arguments.py new file mode 100644 index 000000000..2fc8e81d4 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/normal_arguments.py @@ -0,0 +1,73 @@ +# ----------------- +# normal arguments (no keywords) +# ----------------- + + +def simple(a): + return a + +simple(1) +#! 6 type-error-too-few-arguments +simple() +#! 10 type-error-too-many-arguments +simple(1, 2) + + +#! 10 type-error-too-many-arguments +simple(1, 2, 3) + +# ----------------- +# keyword arguments +# ----------------- + +simple(a=1) +#! 7 type-error-keyword-argument +simple(b=1) +#! 10 type-error-too-many-arguments +simple(1, a=1) + + +def two_params(x, y): + return y + + +two_params(y=2, x=1) +two_params(1, y=2) + +#! 11 type-error-multiple-values +two_params(1, x=2) +#! 17 type-error-too-many-arguments +two_params(1, 2, y=3) + +# ----------------- +# default arguments +# ----------------- + +def default(x, y=1, z=2): + return x + +#! 7 type-error-too-few-arguments +default() +default(1) +default(1, 2) +default(1, 2, 3) +#! 17 type-error-too-many-arguments +default(1, 2, 3, 4) + +default(x=1) + +# ----------------- +# class arguments +# ----------------- + +class Instance(): + def __init__(self, foo): + self.foo = foo + +Instance(1).foo +Instance(foo=1).foo + +#! 12 type-error-too-many-arguments +Instance(1, 2).foo +#! 8 type-error-too-few-arguments +Instance().foo diff --git a/bundle/jedi-vim/pythonx/jedi/test/static_analysis/operations.py b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/operations.py new file mode 100644 index 000000000..bca27c6a9 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/operations.py @@ -0,0 +1,17 @@ +-1 + 1 +1 + 1.0 +#! 2 type-error-operation +1 + '1' +#! 2 type-error-operation +1 - '1' + +-1 - - 1 +# TODO uncomment +#-1 - int() +#int() - float() +float() - 3.0 + +a = 3 +b = '' +#! 2 type-error-operation +a + b diff --git a/bundle/jedi-vim/pythonx/jedi/test/static_analysis/star_arguments.py b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/star_arguments.py new file mode 100644 index 000000000..34be43b55 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/star_arguments.py @@ -0,0 +1,119 @@ +# ----------------- +# *args +# ----------------- + + +def simple(a): + return a + + +def nested(*args): + return simple(*args) + +nested(1) +#! 6 type-error-too-few-arguments +nested() + + +def nested_no_call_to_function(*args): + return simple(1, *args) + + +def simple2(a, b, c): + return b +def nested(*args): + return simple2(1, *args) +def nested_twice(*args1): + return nested(*args1) + +nested_twice(2, 3) +#! 13 type-error-too-few-arguments +nested_twice(2) +#! 19 type-error-too-many-arguments +nested_twice(2, 3, 4) + + +# A named argument can be located before *args. +def star_args_with_named(*args): + return simple2(c='', *args) + +star_args_with_named(1, 2) +# ----------------- +# **kwargs +# ----------------- + + +def kwargs_test(**kwargs): + return simple2(1, **kwargs) + +kwargs_test(c=3, b=2) +#! 12 type-error-too-few-arguments +kwargs_test(c=3) +#! 12 type-error-too-few-arguments +kwargs_test(b=2) +#! 22 type-error-keyword-argument +kwargs_test(b=2, c=3, d=4) +#! 12 type-error-multiple-values +kwargs_test(b=2, c=3, a=4) + + +def kwargs_nested(**kwargs): + return kwargs_test(b=2, **kwargs) + +kwargs_nested(c=3) +#! 13 type-error-too-few-arguments +kwargs_nested() +#! 19 type-error-keyword-argument +kwargs_nested(c=2, d=4) +#! 14 type-error-multiple-values +kwargs_nested(c=2, a=4) +# TODO reenable +##! 14 type-error-multiple-values +#kwargs_nested(b=3, c=2) + +# ----------------- +# mixed *args/**kwargs +# ----------------- + +def simple_mixed(a, b, c): + return b + +def mixed(*args, **kwargs): + return simple_mixed(1, *args, **kwargs) + +mixed(1, 2) +mixed(1, c=2) +mixed(b=2, c=3) +mixed(c=4, b='') + +# need separate functions, otherwise these might swallow the errors +def mixed2(*args, **kwargs): + return simple_mixed(1, *args, **kwargs) + + +#! 7 type-error-too-few-arguments +mixed2(c=2) +#! 7 type-error-too-few-arguments +mixed2(3) +#! 13 type-error-too-many-arguments +mixed2(3, 4, 5) +# TODO reenable +##! 13 type-error-too-many-arguments +#mixed2(3, 4, c=5) +#! 7 type-error-multiple-values +mixed2(3, b=5) + +# ----------------- +# plain wrong arguments +# ----------------- + +#! 12 type-error-star-star +simple(1, **[]) +#! 12 type-error-star-star +simple(1, **1) +class A(): pass +#! 12 type-error-star-star +simple(1, **A()) + +#! 11 type-error-star +simple(1, *1) diff --git a/bundle/jedi-vim/pythonx/jedi/test/static_analysis/try_except.py b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/try_except.py new file mode 100644 index 000000000..5e2797780 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/static_analysis/try_except.py @@ -0,0 +1,107 @@ +try: + #! 4 attribute-error + str.not_existing +except TypeError: + pass + +try: + str.not_existing +except AttributeError: + #! 4 attribute-error + str.not_existing + pass + +try: + import not_existing_import +except ImportError: + pass +try: + #! 7 import-error + import not_existing_import2 +except AttributeError: + pass + +# ----------------- +# multi except +# ----------------- +try: + str.not_existing +except (TypeError, AttributeError): pass + +try: + str.not_existing +except ImportError: + pass +except (NotImplementedError, AttributeError): pass + +try: + #! 4 attribute-error + str.not_existing +except (TypeError, NotImplementedError): pass + +# ----------------- +# detailed except +# ----------------- +try: + str.not_existing +except ((AttributeError)): pass +try: + #! 4 attribute-error + str.not_existing +except [AttributeError]: pass + +# Should be able to detect errors in except statement as well. +try: + pass +#! 7 name-error +except Undefined: + pass + +# ----------------- +# inheritance +# ----------------- + +try: + undefined +except Exception: + pass + +# should catch everything +try: + undefined +except: + pass + +# ----------------- +# kind of similar: hasattr +# ----------------- + +if hasattr(str, 'undefined'): + str.undefined + str.upper + #! 4 attribute-error + str.undefined2 + #! 4 attribute-error + int.undefined +else: + str.upper + #! 4 attribute-error + str.undefined + +# ----------------- +# arguments +# ----------------- + +def i_see(r): + return r + +def lala(): + # This weird structure checks if the error is actually resolved in the + # right place. + a = TypeError + try: + i_see() + except a: + pass + #! 5 type-error-too-few-arguments + i_see() diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_api/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/test_api/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_api/test_analysis.py b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_analysis.py new file mode 100644 index 000000000..64f9c22e1 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_analysis.py @@ -0,0 +1,6 @@ +def test_issue436(Script): + code = "bar = 0\nbar += 'foo' + 4" + errors = set(repr(e) for e in Script(code)._analysis()) + assert len(errors) == 2 + assert '' in errors + assert '' in errors diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_api/test_api.py b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_api.py new file mode 100644 index 000000000..55feaf8bb --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_api.py @@ -0,0 +1,405 @@ +""" +Test all things related to the ``jedi.api`` module. +""" + +import os +from textwrap import dedent + +import pytest +from pytest import raises +from parso import cache + +from jedi import preload_module +from jedi.inference.gradual import typeshed +from test.helpers import test_dir, get_example_dir + + +def test_preload_modules(): + def check_loaded(*module_names): + for grammar_cache in cache.parser_cache.values(): + if None in grammar_cache: + break + # Filter the typeshed parser cache. + typeshed_cache_count = sum( + 1 for path in grammar_cache + if path is not None and str(path).startswith(str(typeshed.TYPESHED_PATH)) + ) + # +1 for None module (currently used) + assert len(grammar_cache) - typeshed_cache_count == len(module_names) + 1 + for i in module_names: + assert [i in str(k) for k in grammar_cache.keys() if k is not None] + + old_cache = cache.parser_cache.copy() + cache.parser_cache.clear() + + try: + preload_module('sys') + check_loaded() # compiled (c_builtin) modules shouldn't be in the cache. + preload_module('types', 'token') + check_loaded('types', 'token') + finally: + cache.parser_cache.update(old_cache) + + +def test_empty_script(Script): + assert Script('') + + +def test_line_number_errors(Script): + """ + Script should raise a ValueError if line/column numbers are not in a + valid range. + """ + s = 'hello' + # lines + with raises(ValueError): + Script(s).complete(2, 0) + with raises(ValueError): + Script(s).complete(0, 0) + + # columns + with raises(ValueError): + Script(s).infer(1, len(s) + 1) + with raises(ValueError): + Script(s).goto(1, -1) + + # ok + Script(s).get_signatures(1, 0) + Script(s).get_references(1, len(s)) + + +def _check_number(Script, source, result='float'): + completions = Script(source).complete() + assert completions[0].parent().name == result + + +def test_completion_on_number_literals(Script): + # No completions on an int literal (is a float). + assert [c.name for c in Script('1. ').complete()] \ + == ['and', 'if', 'in', 'is', 'not', 'or'] + + # Multiple points after an int literal basically mean that there's a float + # and a call after that. + _check_number(Script, '1..') + _check_number(Script, '1.0.') + + # power notation + _check_number(Script, '1.e14.') + _check_number(Script, '1.e-3.') + _check_number(Script, '9e3.') + assert Script('1.e3..').complete() == [] + assert Script('1.e-13..').complete() == [] + + +def test_completion_on_hex_literals(Script): + assert Script('0x1..').complete() == [] + _check_number(Script, '0x1.', 'int') # hexdecimal + # Completing binary literals doesn't work if they are not actually binary + # (invalid statements). + assert Script('0b2.b').complete() == [] + _check_number(Script, '0b1.', 'int') # binary + + _check_number(Script, '0x2e.', 'int') + _check_number(Script, '0xE7.', 'int') + _check_number(Script, '0xEa.', 'int') + # theoretically, but people can just check for syntax errors: + assert Script('0x.').complete() == [] + + +def test_completion_on_complex_literals(Script): + assert Script('1j..').complete() == [] + _check_number(Script, '1j.', 'complex') + _check_number(Script, '44.j.', 'complex') + _check_number(Script, '4.0j.', 'complex') + # No dot no completion - I thought, but 4j is actually a literal after + # which a keyword like or is allowed. Good times, haha! + # However this has been disabled again, because it apparently annoyed + # users. So no completion after j without a space :) + assert not Script('4j').complete() + assert ({c.name for c in Script('4j ').complete()} + == {'if', 'and', 'in', 'is', 'not', 'or'}) + + +def test_goto_non_name(Script, environment): + assert Script('for').goto() == [] + + assert Script('assert').goto() == [] + assert Script('True').goto() == [] + + +def test_infer_on_non_name(Script): + assert Script('import x').infer(column=0) == [] + + +def test_infer_on_generator(Script, environment): + script = Script('def x(): yield 1\ny=x()\ny') + def_, = script.infer() + assert def_.name == 'Generator' + def_, = script.infer(only_stubs=True) + assert def_.name == 'Generator' + + +def test_goto_definition_not_multiple(Script): + """ + There should be only one result if it leads back to the same + origin (e.g. instance method) + """ + + s = dedent('''\ + import random + class A(): + def __init__(self, a): + self.a = 3 + + def foo(self): + pass + + if random.randint(0, 1): + a = A(2) + else: + a = A(1) + a''') + assert len(Script(s).infer()) == 1 + + +def test_reference_description(Script): + descs = [u.description for u in Script("foo = ''; foo").get_references()] + assert set(descs) == {"foo = ''", 'foo'} + + +def test_get_line_code(Script): + def get_line_code(source, line=None, **kwargs): + # On Windows replace \r + return Script(source).complete(line=line)[0].get_line_code(**kwargs).replace('\r', '') + + # On builtin + assert get_line_code('abs') == 'def abs(__x: SupportsAbs[_T]) -> _T: ...\n' + + # On custom code + first_line = 'def foo():\n' + line = ' foo' + code = first_line + line + assert get_line_code(code) == first_line + + # With before/after + code = code + '\nother_line' + assert get_line_code(code, line=2) == first_line + assert get_line_code(code, line=2, after=1) == first_line + line + '\n' + assert get_line_code(code, line=2, after=2, before=1) == code + # Should just be the whole thing, since there are no more lines on both + # sides. + assert get_line_code(code, line=2, after=3, before=3) == code + + +def test_get_line_code_on_builtin(Script, disable_typeshed): + abs_ = Script('abs').complete()[0] + assert abs_.name == 'abs' + assert abs_.get_line_code() == '' + assert abs_.line is None + + +def test_goto_follow_imports(Script): + code = dedent(""" + import inspect + inspect.isfunction""") + definition, = Script(code).goto(column=0, follow_imports=True) + assert definition.module_path.name == 'inspect.py' + assert (definition.line, definition.column) == (1, 0) + + definition, = Script(code).goto(follow_imports=True) + assert definition.module_path.name == 'inspect.py' + assert (definition.line, definition.column) > (1, 0) + + code = '''def param(p): pass\nparam(1)''' + start_pos = 1, len('def param(') + + script = Script(code) + definition, = script.goto(*start_pos, follow_imports=True) + assert (definition.line, definition.column) == start_pos + assert definition.name == 'p' + result, = definition.goto() + assert result.name == 'p' + result, = definition.infer() + assert result.name == 'int' + result, = result.infer() + assert result.name == 'int' + + definition, = script.goto(*start_pos) + assert (definition.line, definition.column) == start_pos + + d, = Script('a = 1\na').goto(follow_imports=True) + assert d.name == 'a' + + +def test_goto_module(Script): + def check(line, expected, follow_imports=False): + script = Script(path=path) + module, = script.goto(line=line, follow_imports=follow_imports) + assert module.module_path == expected + + base_path = get_example_dir('simple_import') + path = base_path.joinpath('__init__.py') + + check(1, base_path.joinpath('module.py')) + check(1, base_path.joinpath('module.py'), follow_imports=True) + check(5, base_path.joinpath('module2.py')) + + +def test_goto_definition_cursor(Script): + + s = ("class A():\n" + " def _something(self):\n" + " return\n" + " def different_line(self,\n" + " b):\n" + " return\n" + "A._something\n" + "A.different_line" + ) + + in_name = 2, 9 + under_score = 2, 8 + cls = 2, 7 + should1 = 7, 10 + diff_line = 4, 10 + should2 = 8, 10 + + def get_def(pos): + return [d.description for d in Script(s).infer(*pos)] + + in_name = get_def(in_name) + under_score = get_def(under_score) + should1 = get_def(should1) + should2 = get_def(should2) + + diff_line = get_def(diff_line) + + assert should1 == in_name + assert should1 == under_score + + assert should2 == diff_line + + assert get_def(cls) == [] + + +def test_no_statement_parent(Script): + source = dedent(""" + def f(): + pass + + class C: + pass + + variable = f if random.choice([0, 1]) else C""") + defs = Script(source).infer(column=3) + defs = sorted(defs, key=lambda d: d.line) + assert [d.description for d in defs] == ['def f', 'class C'] + + +def test_backslash_continuation_and_bracket(Script): + code = dedent(r""" + x = 0 + a = \ + [1, 2, 3, (x)]""") + + lines = code.splitlines() + column = lines[-1].index('(') + def_, = Script(code).infer(line=len(lines), column=column) + assert def_.name == 'int' + + +def test_goto_follow_builtin_imports(Script): + s = Script('import sys; sys') + d, = s.goto(follow_imports=True) + assert d.in_builtin_module() is True + d, = s.goto(follow_imports=True, follow_builtin_imports=True) + assert d.in_builtin_module() is True + + +def test_docstrings_for_completions(Script): + for c in Script('').complete(): + assert isinstance(c.docstring(), str) + + +def test_fuzzy_completion(Script): + script = Script('string = "hello"\nstring.upper') + assert ['isupper', + 'upper'] == [comp.name for comp in script.complete(fuzzy=True)] + + +def test_math_fuzzy_completion(Script, environment): + script = Script('import math\nmath.og') + expected = ['copysign', 'log', 'log10', 'log1p', 'log2'] + completions = script.complete(fuzzy=True) + assert expected == [comp.name for comp in completions] + for c in completions: + assert c.complete is None + + +def test_file_fuzzy_completion(Script): + path = os.path.join(test_dir, 'completion') + script = Script('"{}/ep08_i'.format(path)) + expected = [ + 'pep0484_basic.py"', + 'pep0484_generic_mismatches.py"', + 'pep0484_generic_parameters.py"', + 'pep0484_generic_passthroughs.py"', + 'pep0484_typing.py"', + ] + assert expected == [comp.name for comp in script.complete(fuzzy=True)] + + +@pytest.mark.parametrize( + 'code, column', [ + ('"foo"', 0), + ('"foo"', 3), + ('"foo"', None), + ('"""foo"""', 5), + ('"""foo"""', 1), + ('"""foo"""', 2), + ] +) +def test_goto_on_string(Script, code, column): + script = Script(code) + assert not script.infer(column=column) + assert not script.goto(column=column) + + +def test_multi_goto(Script): + script = Script('x = 1\ny = 1.0\nx\ny') + x, = script.goto(line=3) + y, = script.goto(line=4) + assert x.line == 1 + assert y.line == 2 + + +@pytest.mark.parametrize( + 'code, column, expected', [ + ('str() ', 3, 'str'), + ('str() ', 4, 'str'), + ('str() ', 5, 'str'), + ('str() ', 6, None), + ('str( ) ', 6, None), + (' 1', 1, None), + ('str(1) ', 3, 'str'), + ('str(1) ', 4, 'int'), + ('str(1) ', 5, 'int'), + ('str(1) ', 6, 'str'), + ('str(1) ', 7, None), + ('str( 1) ', 4, 'str'), + ('str( 1) ', 5, 'int'), + ('str(+1) ', 4, 'str'), + ('str(+1) ', 5, 'int'), + ('str(1, 1.) ', 3, 'str'), + ('str(1, 1.) ', 4, 'int'), + ('str(1, 1.) ', 5, 'int'), + ('str(1, 1.) ', 6, None), + ('str(1, 1.) ', 7, 'float'), + ] +) +def test_infer_after_parentheses(Script, code, column, expected): + completions = Script(code).infer(column=column) + if expected is None: + assert completions == [] + else: + assert [c.name for c in completions] == [expected] diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_api/test_api_classes_follow_definition.py b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_api_classes_follow_definition.py new file mode 100644 index 000000000..96108141d --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_api_classes_follow_definition.py @@ -0,0 +1,63 @@ +from os.path import join +from itertools import chain +from functools import partial + +import jedi +from ..helpers import test_dir + + +def test_import_empty(Script): + """ github #340, return the full word. """ + completion = Script("import ").complete()[0] + definition = completion.infer()[0] + assert definition + + +def check_follow_definition_types(Script, source): + # nested import + completions = Script(source, path='some_path.py').complete() + defs = chain.from_iterable(c.infer() for c in completions) + return [d.type for d in defs] + + +def test_follow_import_incomplete(Script, environment): + """ + Completion on incomplete imports should always take the full completion + to do any type inference. + """ + datetime = check_follow_definition_types(Script, "import itertool") + assert datetime == ['module'] + + # empty `from * import` parts + itert = jedi.Script("from itertools import ").complete() + definitions = [d for d in itert if d.name == 'chain'] + assert len(definitions) == 1 + assert [d.type for d in definitions[0].infer()] == ['class'] + + # incomplete `from * import` part + datetime = check_follow_definition_types(Script, "from datetime import datetim") + assert set(datetime) == {'class', 'instance'} # py3: builtin and pure py version + # os.path check + ospath = check_follow_definition_types(Script, "from os.path import abspat") + assert set(ospath) == {'function'} + + # alias + alias = check_follow_definition_types(Script, "import io as abcd; abcd") + assert alias == ['module'] + + +def test_follow_definition_nested_import(Script): + Script = partial(Script, project=jedi.Project(join(test_dir, 'completion', 'import_tree'))) + types = check_follow_definition_types(Script, "import pkg.mod1; pkg") + assert types == ['module'] + + types = check_follow_definition_types(Script, "import pkg.mod1; pkg.mod1") + assert types == ['module'] + + types = check_follow_definition_types(Script, "import pkg.mod1; pkg.mod1.a") + assert types == ['instance'] + + +def test_follow_definition_land_on_import(Script): + types = check_follow_definition_types(Script, "import datetime; datetim") + assert types == ['module'] diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_api/test_call_signatures.py b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_call_signatures.py new file mode 100644 index 000000000..f7b5618a8 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_call_signatures.py @@ -0,0 +1,659 @@ +from textwrap import dedent +import inspect +from unittest import TestCase + +import pytest + +from jedi import cache +from jedi.parser_utils import get_signature +from jedi import Interpreter + + +def assert_signature(Script, source, expected_name, expected_index=0, line=None, column=None): + signatures = Script(source).get_signatures(line, column) + + assert len(signatures) <= 1 + + if not signatures: + assert expected_name is None, \ + 'There are no signatures, but `%s` expected.' % expected_name + else: + assert signatures[0].name == expected_name + assert signatures[0].index == expected_index + return signatures[0] + + +def test_valid_call(Script): + assert_signature(Script, 'bool()', 'bool', column=5) + + +class TestSignatures(TestCase): + @pytest.fixture(autouse=True) + def init(self, Script): + self.Script = Script + + def _run_simple(self, source, name, index=0, column=None, line=1): + assert_signature(self.Script, source, name, index, line, column) + + def test_simple(self): + run = self._run_simple + + # simple + s1 = "tuple(a, bool(" + run(s1, 'tuple', 0, 6) + run(s1, 'tuple', None, 8) + run(s1, 'tuple', None, 9) + run(s1, 'bool', 0, 14) + + s2 = "abs(), " + run(s2, 'abs', 0, 4) + run(s2, None, column=5) + run(s2, None) + + s3 = "abs()." + run(s3, None, column=5) + run(s3, None) + + def test_more_complicated(self): + run = self._run_simple + + s4 = 'abs(bool(), , set,' + run(s4, None, column=3) + run(s4, 'abs', 0, 4) + run(s4, 'bool', 0, 9) + run(s4, 'abs', 0, 10) + run(s4, 'abs', None, 11) + + s5 = "tuple(1,\nif 2:\n def a():" + run(s5, 'tuple', 0, 6) + run(s5, 'tuple', None, 8) + + s6 = "bool().__eq__(" + run(s6, '__eq__', 0) + run(s6, 'bool', 0, 5) + + s7 = "str().upper().center(" + s8 = "bool(int[abs(" + run(s7, 'center', 0) + run(s8, 'abs', 0) + run(s8, 'bool', 0, 10) + + run("import time; abc = time; abc.sleep(", 'sleep', 0) + + def test_issue_57(self): + # jedi #57 + s = "def func(alpha, beta): pass\n" \ + "func(alpha='101'," + self._run_simple(s, 'func', 0, column=13, line=2) + + def test_for(self): + # jedi-vim #11 + self._run_simple("for tuple(", 'tuple', 0) + self._run_simple("for s in tuple(", 'tuple', 0) + + +def test_with(Script): + # jedi-vim #9 + sigs = Script("with open(").get_signatures() + assert sigs + assert all(sig.name == 'open' for sig in sigs) + + +def test_get_signatures_empty_parentheses_pre_space(Script): + s = dedent("""\ + def f(a, b): + pass + f( )""") + assert_signature(Script, s, 'f', 0, line=3, column=3) + + +def test_multiple_signatures(Script): + s = dedent("""\ + if x: + def f(a, b): + pass + else: + def f(a, b): + pass + f(""") + assert len(Script(s).get_signatures()) == 2 + + +def test_get_signatures_whitespace(Script): + # note: trailing space after 'abs' + s = 'abs( \ndef x():\n pass\n' + assert_signature(Script, s, 'abs', 0, line=1, column=5) + + +def test_decorator_in_class(Script): + """ + There's still an implicit param, with a decorator. + Github issue #319. + """ + s = dedent("""\ + def static(func): + def wrapped(obj, *args): + return f(type(obj), *args) + return wrapped + + class C(object): + @static + def test(cls): + return 10 + + C().test(""") + + signatures = Script(s).get_signatures() + assert len(signatures) == 1 + x = [p.description for p in signatures[0].params] + assert x == ['param *args'] + + +def test_additional_brackets(Script): + assert_signature(Script, 'abs((', 'abs', 0) + + +def test_unterminated_strings(Script): + assert_signature(Script, 'abs(";', 'abs', 0) + + +def test_whitespace_before_bracket(Script): + assert_signature(Script, 'abs (', 'abs', 0) + assert_signature(Script, 'abs (";', 'abs', 0) + assert_signature(Script, 'abs\n(', None) + + +def test_brackets_in_string_literals(Script): + assert_signature(Script, 'abs (" (', 'abs', 0) + assert_signature(Script, 'abs (" )', 'abs', 0) + + +def test_function_definitions_should_break(Script): + """ + Function definitions (and other tokens that cannot exist within call + signatures) should break and not be able to return a signature. + """ + assert_signature(Script, 'abs(\ndef x', 'abs', 0) + assert not Script('abs(\ndef x(): pass').get_signatures() + + +def test_flow_call(Script): + assert not Script('if (1').get_signatures() + + +def test_chained_calls(Script): + source = dedent(''' + class B(): + def test2(self, arg): + pass + + class A(): + def test1(self): + return B() + + A().test1().test2(''') + + assert_signature(Script, source, 'test2', 0) + + +def test_return(Script): + source = dedent(''' + def foo(): + return '.'.join()''') + + assert_signature(Script, source, 'join', 0, column=len(" return '.'.join(")) + + +def test_find_signature_on_module(Script): + """github issue #240""" + s = 'import datetime; datetime(' + # just don't throw an exception (if numpy doesn't exist, just ignore it) + assert Script(s).get_signatures() == [] + + +def test_complex(Script, environment): + s = """ + def abc(a,b): + pass + + def a(self): + abc( + + if 1: + pass + """ + assert_signature(Script, s, 'abc', 0, line=6, column=20) + s = """ + import re + def huhu(it): + re.compile( + return it * 2 + """ + sig1, sig2 = sorted(Script(s).get_signatures(line=4, column=27), key=lambda s: s.line) + assert sig1.name == sig2.name == 'compile' + assert sig1.index == sig2.index == 0 + func1, = sig1._name.infer() + func2, = sig2._name.infer() + + # Do these checks just for Python 3, I'm too lazy to deal with this + # legacy stuff. ~ dave. + assert get_signature(func1.tree_node) \ + == 'compile(pattern: AnyStr, flags: _FlagsType = ...) -> Pattern[AnyStr]' + assert get_signature(func2.tree_node) \ + == 'compile(pattern: Pattern[AnyStr], flags: _FlagsType = ...) ->\nPattern[AnyStr]' + + # jedi-vim #70 + s = """def foo(""" + assert Script(s).get_signatures() == [] + + # jedi-vim #116 + s = """import itertools; test = getattr(itertools, 'chain'); test(""" + assert_signature(Script, s, 'chain', 0) + + +def _params(Script, source, line=None, column=None): + signatures = Script(source).get_signatures(line, column) + assert len(signatures) == 1 + return signatures[0].params + + +def test_int_params(Script): + sig1, sig2 = Script('int(').get_signatures() + # int is defined as: `int(x[, base])` + assert len(sig1.params) == 1 + assert sig1.params[0].name == 'x' + assert len(sig2.params) == 2 + assert sig2.params[0].name == 'x' + assert sig2.params[1].name == 'base' + + +def test_pow_params(Script): + # See Github #1357. + for sig in Script('pow(').get_signatures(): + param_names = [p.name for p in sig.params] + assert param_names in (['base', 'exp'], ['base', 'exp', 'mod']) + + +def test_param_name(Script): + sigs = Script('open(something,').get_signatures() + for sig in sigs: + # All of the signatures (in Python the function is overloaded), + # contain the same param names. + assert sig.params[0].name in ['file', 'name'] + assert sig.params[1].name == 'mode' + assert sig.params[2].name == 'buffering' + + +def test_builtins(Script): + """ + The self keyword should be visible even for builtins, if not + instantiated. + """ + p = _params(Script, 'str.endswith(') + assert p[0].name == 'self' + assert p[1].name == 'suffix' + p = _params(Script, 'str().endswith(') + assert p[0].name == 'suffix' + + +def test_signature_is_definition(Script): + """ + Through inheritance, a signature is a sub class of Name. + Check if the attributes match. + """ + s = """class Spam(): pass\nSpam""" + signature = Script(s + '(').get_signatures()[0] + definition = Script(s + '(').infer(column=0)[0] + signature.line == 1 + signature.column == 6 + + # Now compare all the attributes that a Signature must also have. + for attr_name in dir(definition): + dont_scan = ['defined_names', 'parent', 'goto_assignments', 'infer', + 'params', 'get_signatures', 'execute', 'goto', + 'desc_with_module'] + if attr_name.startswith('_') or attr_name in dont_scan: + continue + + attribute = getattr(definition, attr_name) + signature_attribute = getattr(signature, attr_name) + if inspect.ismethod(attribute): + assert attribute() == signature_attribute() + else: + assert attribute == signature_attribute + + +def test_no_signature(Script): + # str doesn't have a __call__ method + assert Script('str()(').get_signatures() == [] + + s = dedent("""\ + class X(): + pass + X()(""") + assert Script(s).get_signatures() == [] + assert len(Script(s).get_signatures(column=2)) == 1 + assert Script('').get_signatures() == [] + + +def test_dict_literal_in_incomplete_call(Script): + source = """\ + import json + + def foo(): + json.loads( + + json.load.return_value = {'foo': [], + 'bar': True} + + c = Foo() + """ + + script = Script(dedent(source)) + assert script.get_signatures(line=4, column=15) + + +def test_completion_interference(Script): + """Seems to cause problems, see also #396.""" + cache.parser_cache.pop(None, None) + assert Script('open(').get_signatures() + + # complete something usual, before doing the same get_signatures again. + assert Script('from datetime import ').complete() + + assert Script('open(').get_signatures() + + +def test_keyword_argument_index(Script, environment): + def get(source, column=None): + return Script(source).get_signatures(column=column)[0] + + assert get('sorted([], key=a').index == 1 + assert get('sorted([], key=').index == 1 + assert get('sorted([], no_key=a').index is None + + kw_func = 'def foo(a, b): pass\nfoo(b=3, a=4)' + assert get(kw_func, column=len('foo(b')).index == 0 + assert get(kw_func, column=len('foo(b=')).index == 1 + assert get(kw_func, column=len('foo(b=3, a=')).index == 0 + + kw_func_simple = 'def foo(a, b): pass\nfoo(b=4)' + assert get(kw_func_simple, column=len('foo(b')).index == 0 + assert get(kw_func_simple, column=len('foo(b=')).index == 1 + + args_func = 'def foo(*kwargs): pass\n' + assert get(args_func + 'foo(a').index == 0 + assert get(args_func + 'foo(a, b').index == 0 + + kwargs_func = 'def foo(**kwargs): pass\n' + assert get(kwargs_func + 'foo(a=2').index == 0 + assert get(kwargs_func + 'foo(a=2, b=2').index == 0 + + both = 'def foo(*args, **kwargs): pass\n' + assert get(both + 'foo(a=2').index == 1 + assert get(both + 'foo(a=2, b=2').index == 1 + assert get(both + 'foo(a=2, b=2)', column=len('foo(b=2, a=2')).index == 1 + assert get(both + 'foo(a, b, c').index == 0 + + +code1 = 'def f(u, /, v=3, *, abc, abd, xyz): pass' +code2 = 'def g(u, /, v=3, *, abc, abd, xyz, **kwargs): pass' +code3 = 'def h(u, /, v, *args, x=1, y): pass' +code4 = 'def i(u, /, v, *args, x=1, y, **kwargs): pass' + + +_calls = [ + # No *args, **kwargs + (code1, 'f(', 0), + (code1, 'f(a', 0), + (code1, 'f(a,', 1), + (code1, 'f(a,b', 1), + (code1, 'f(a,b,', 2), + (code1, 'f(a,b,c', None), + (code1, 'f(a,b,a', 2), + (code1, 'f(a,b,a=', None), + (code1, 'f(a,b,abc', 2), + (code1, 'f(a,b,abc=(', 2), + (code1, 'f(a,b,abc=(f,1,2,3', 2), + (code1, 'f(a,b,abd', 3), + (code1, 'f(a,b,x', 4), + (code1, 'f(a,b,xy', 4), + (code1, 'f(a,b,xyz=', 4), + (code1, 'f(a,b,xy=', None), + (code1, 'f(u=', (0, None)), + (code1, 'f(v=', 1), + + # **kwargs + (code2, 'g(a,b,a', 2), + (code2, 'g(a,b,abc', 2), + (code2, 'g(a,b,abd', 3), + (code2, 'g(a,b,arr', 5), + (code2, 'g(a,b,xy', 4), + (code2, 'g(a,b,xyz=', 4), + (code2, 'g(a,b,xy=', 5), + (code2, 'g(a,b,abc=1,abd=4,', 4), + (code2, 'g(a,b,abc=1,xyz=3,abd=4,', 5), + (code2, 'g(a,b,abc=1,abd=4,lala', 5), + (code2, 'g(a,b,abc=1,abd=4,lala=', 5), + (code2, 'g(a,b,abc=1,abd=4,abd=', 5), + (code2, 'g(a,b,kw', 5), + (code2, 'g(a,b,kwargs=', 5), + (code2, 'g(u=', (0, 5)), + (code2, 'g(v=', 1), + + # *args + (code3, 'h(a,b,c', 2), + (code3, 'h(a,b,c,', 2), + (code3, 'h(a,b,c,d', 2), + (code3, 'h(a,b,c,d[', 2), + (code3, 'h(a,b,c,(3,', 2), + (code3, 'h(a,b,c,(3,)', 2), + (code3, 'h(a,b,args=', None), + (code3, 'h(u,v=', 1), + (code3, 'h(u=', (0, None)), + (code3, 'h(u,*xxx', 1), + (code3, 'h(u,*xxx,*yyy', 1), + (code3, 'h(u,*[]', 1), + (code3, 'h(u,*', 1), + (code3, 'h(u,*, *', 1), + (code3, 'h(u,1,**', 3), + (code3, 'h(u,**y', 1), + (code3, 'h(u,x=2,**', 1), + (code3, 'h(u,x=2,**y', 1), + (code3, 'h(u,v=2,**y', 3), + (code3, 'h(u,x=2,**vv', 1), + + # *args, **kwargs + (code4, 'i(a,b,c,d', 2), + (code4, 'i(a,b,c,d,e', 2), + (code4, 'i(a,b,c,d,e=', 5), + (code4, 'i(a,b,c,d,e=3', 5), + (code4, 'i(a,b,c,d=,x=', 3), + (code4, 'i(a,b,c,d=5,x=4', 3), + (code4, 'i(a,b,c,d=5,x=4,y', 4), + (code4, 'i(a,b,c,d=5,x=4,y=3,', 5), + (code4, 'i(a,b,c,d=5,y=4,x=3,', 5), + (code4, 'i(a,b,c,d=4,', 3), + (code4, 'i(a,b,c,x=1,d=,', 4), + + # Error nodes + (code4, 'i(1, [a,b', 1), + (code4, 'i(1, [a,b=,', 2), + (code4, 'i(1, [a?b,', 2), + (code4, 'i(1, [a?b,*', 2), + (code4, 'i(?b,*r,c', 1), + (code4, 'i(?*', 0), + (code4, 'i(?**', (0, 1)), + + # Random + (code4, 'i(()', 0), + (code4, 'i((),', 1), + (code4, 'i([(),', 0), + (code4, 'i([(,', 1), + (code4, 'i(x,()', 1), +] + + +@pytest.mark.parametrize('ending', ['', ')']) +@pytest.mark.parametrize('code, call, expected_index', _calls) +def test_signature_index(Script, environment, code, call, expected_index, ending): + if isinstance(expected_index, tuple): + expected_index = expected_index[environment.version_info > (3, 8)] + if environment.version_info < (3, 8): + code = code.replace('/,', '') + + sig, = Script(code + '\n' + call + ending).get_signatures(column=len(call)) + index = sig.index + assert expected_index == index + + +@pytest.mark.parametrize('code', ['foo', 'instance.foo']) +def test_arg_defaults(Script, environment, code): + def foo(arg="bla", arg1=1): + pass + + class Klass: + def foo(self, arg="bla", arg1=1): + pass + + instance = Klass() + + src = dedent(""" + def foo2(arg="bla", arg1=1): + pass + + class Klass2: + def foo2(self, arg="bla", arg1=1): + pass + + instance = Klass2() + """) + + executed_locals = dict() + exec(src, None, executed_locals) + locals_ = locals() + + def iter_scripts(): + yield Interpreter(code + '(', namespaces=[locals_]) + yield Script(src + code + "2(") + yield Interpreter(code + '2(', namespaces=[executed_locals]) + + for script in iter_scripts(): + signatures = script.get_signatures() + assert signatures[0].params[0].description in ('param arg="bla"', "param arg='bla'") + assert signatures[0].params[1].description == 'param arg1=1' + + +def test_bracket_start(Script): + def bracket_start(src): + signatures = Script(src).get_signatures() + assert len(signatures) == 1 + return signatures[0].bracket_start + + assert bracket_start('abs(') == (1, 3) + + +def test_different_caller(Script): + """ + It's possible to not use names, but another function result or an array + index and then get the signature of it. + """ + + assert_signature(Script, '[abs][0](', 'abs', 0) + assert_signature(Script, '[abs][0]()', 'abs', 0, column=len('[abs][0](')) + + assert_signature(Script, '(abs)(', 'abs', 0) + assert_signature(Script, '(abs)()', 'abs', 0, column=len('(abs)(')) + + +def test_in_function(Script): + code = dedent('''\ + class X(): + @property + def func(''') + assert not Script(code).get_signatures() + + +def test_lambda_params(Script): + code = dedent('''\ + my_lambda = lambda x: x+1 + my_lambda(1)''') + sig, = Script(code).get_signatures(column=11) + assert sig.index == 0 + assert sig.name == '' + assert [p.name for p in sig.params] == ['x'] + + +CLASS_CODE = dedent('''\ +class X(): + def __init__(self, foo, bar): + self.foo = foo +''') + + +def test_class_creation(Script): + + sig, = Script(CLASS_CODE + 'X(').get_signatures() + assert sig.index == 0 + assert sig.name == 'X' + assert [p.name for p in sig.params] == ['foo', 'bar'] + + +def test_call_init_on_class(Script): + sig, = Script(CLASS_CODE + 'X.__init__(').get_signatures() + assert [p.name for p in sig.params] == ['self', 'foo', 'bar'] + + +def test_call_init_on_instance(Script): + sig, = Script(CLASS_CODE + 'X().__init__(').get_signatures() + assert [p.name for p in sig.params] == ['foo', 'bar'] + + +def test_call_magic_method(Script): + code = dedent('''\ + class X(): + def __call__(self, baz): + pass + ''') + sig, = Script(code + 'X()(').get_signatures() + assert sig.index == 0 + assert sig.name == 'X' + assert [p.name for p in sig.params] == ['baz'] + + sig, = Script(code + 'X.__call__(').get_signatures() + assert [p.name for p in sig.params] == ['self', 'baz'] + sig, = Script(code + 'X().__call__(').get_signatures() + assert [p.name for p in sig.params] == ['baz'] + + +@pytest.mark.parametrize('column', [6, 9]) +def test_cursor_after_signature(Script, column): + source = dedent(""" + def foo(*args): + pass + foo() # _ + """) + + script = Script(source) + + assert not script.get_signatures(4, column) + + +@pytest.mark.parametrize( + 'code, line, column, name, index', [ + ('abs(()\ndef foo(): pass', 1, None, 'abs', 0), + ('abs(chr() \ndef foo(): pass', 1, 10, 'abs', 0), + ('abs(chr()\ndef foo(): pass', 1, None, 'abs', 0), + ('abs(chr()\ndef foo(): pass', 1, 8, 'chr', 0), + ('abs(chr()\ndef foo(): pass', 1, 7, 'abs', 0), + ('abs(chr ( \nclass y: pass', 1, None, 'chr', 0), + ('abs(chr ( \nclass y: pass', 1, 8, 'abs', 0), + ('abs(chr ( \nclass y: pass', 1, 9, 'abs', 0), + ('abs(chr ( \nclass y: pass', 1, 10, 'chr', 0), + ] +) +def test_base_signatures(Script, code, line, column, name, index): + sig, = Script(code).get_signatures(line=line, column=column) + + assert sig.name == name + assert sig.index == index diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_api/test_classes.py b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_classes.py new file mode 100644 index 000000000..cb99548b8 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_classes.py @@ -0,0 +1,668 @@ +""" Test all things related to the ``jedi.api_classes`` module. +""" + +from textwrap import dedent +from inspect import cleandoc + +import pytest + +import jedi +from jedi import __doc__ as jedi_doc +from jedi.inference.compiled import CompiledValueName +from ..helpers import get_example_dir + + +def test_is_keyword(Script): + results = Script('str', path=None).infer(1, 1) + assert len(results) == 1 and results[0].is_keyword is False + + +def test_basedefinition_type(Script, get_names): + def make_definitions(): + """ + Return a list of definitions for parametrized tests. + + :rtype: [jedi.api_classes.BaseName] + """ + source = dedent(""" + import sys + + class C: + pass + + x = C() + + def f(): + pass + + def g(): + yield + + h = lambda: None + """) + + definitions = [] + definitions += get_names(source) + + source += dedent(""" + variable = sys or C or x or f or g or g() or h""") + lines = source.splitlines() + script = Script(source, path=None) + definitions += script.infer(len(lines), len('variable')) + + script2 = Script(source, path=None) + definitions += script2.get_references(4, len('class C')) + + source_param = "def f(a): return a" + script_param = Script(source_param, path=None) + definitions += script_param.goto(1, len(source_param)) + + return definitions + + for definition in make_definitions(): + assert definition.type in ('module', 'class', 'instance', 'function', + 'generator', 'statement', 'import', 'param') + + +@pytest.mark.parametrize( + ('src', 'expected_result', 'column'), [ + # import one level + ('import t', 'module', None), + ('import ', 'module', None), + ('import datetime; datetime', 'module', None), + + # from + ('from datetime import timedelta', 'class', None), + ('from datetime import timedelta; timedelta', 'class', None), + ('from json import tool', 'module', None), + ('from json import tool; tool', 'module', None), + + # import two levels + ('import json.tool; json', 'module', None), + ('import json.tool; json.tool', 'module', None), + ('import json.tool; json.tool.main', 'function', None), + ('import json.tool', 'module', None), + ('import json.tool', 'module', 9), + ] + +) +def test_basedefinition_type_import(Script, src, expected_result, column): + types = {t.type for t in Script(src).complete(column=column)} + assert types == {expected_result} + + +def test_function_signature_in_doc(Script): + defs = Script(""" + def f(x, y=1, z='a'): + pass + f""").infer() + doc = defs[0].docstring() + assert "f(x, y=1, z='a')" in str(doc) + + +def test_param_docstring(get_names): + param = get_names("def test(parameter): pass", all_scopes=True)[1] + assert param.name == 'parameter' + assert param.docstring() == '' + + +def test_class_signature(Script): + defs = Script(""" + class Foo: + def __init__(self, x, y=1, z='a'): + pass + Foo""").infer() + doc = defs[0].docstring() + assert doc == "Foo(x, y=1, z='a')" + + +def test_position_none_if_builtin(Script): + gotos = Script('import sys; sys.path').goto() + assert gotos[0].in_builtin_module() + assert gotos[0].line is not None + assert gotos[0].column is not None + + +def test_completion_docstring(Script, jedi_path): + """ + Jedi should follow imports in certain conditions + """ + def docstr(src, result): + c = Script(src, project=project).complete()[0] + assert c.docstring(raw=True, fast=False) == cleandoc(result) + + project = jedi.Project('.', sys_path=[jedi_path]) + c = Script('import jedi\njed', project=project).complete()[0] + assert c.docstring(fast=False) == cleandoc(jedi_doc) + + docstr('import jedi\njedi.Scr', cleandoc(jedi.Script.__doc__)) + + docstr('abcd=3;abcd', '') + docstr('"hello"\nabcd=3\nabcd', '') + docstr( + dedent(''' + def x(): + "hello" + 0 + x'''), + 'hello' + ) + docstr( + dedent(''' + def x(): + "hello";0 + x'''), + 'hello' + ) + # Shouldn't work with a tuple. + docstr( + dedent(''' + def x(): + "hello",0 + x'''), + '' + ) + # Should also not work if we rename something. + docstr( + dedent(''' + def x(): + "hello" + y = x + y'''), + '' + ) + + +def test_completion_params(Script): + c = Script('import string; string.capwords').complete()[0] + assert [p.name for p in c.get_signatures()[0].params] == ['s', 'sep'] + + +def test_functions_should_have_params(Script): + for c in Script('bool.').complete(): + if c.type == 'function': + if c.name in ('denominator', 'numerator', 'imag', 'real', '__class__'): + # Properties + assert not c.get_signatures() + else: + assert c.get_signatures() + + +def test_hashlib_params(Script, environment): + if environment.version_info < (3,): + pytest.skip() + + script = Script('from hashlib import sha256') + c, = script.complete() + sig, = c.get_signatures() + assert [p.name for p in sig.params] == ['string'] + + +def test_signature_params(Script): + def check(defs): + signature, = defs[0].get_signatures() + assert len(signature.params) == 1 + assert signature.params[0].name == 'bar' + + s = dedent(''' + def foo(bar): + pass + foo''') + + check(Script(s).infer()) + + check(Script(s).goto()) + check(Script(s + '\nbar=foo\nbar').goto()) + + +def test_param_endings(Script): + """ + Params should be represented without the comma and whitespace they have + around them. + """ + sig, = Script('def x(a, b=5, c=""): pass\n x(').get_signatures() + assert [p.description for p in sig.params] == ['param a', 'param b=5', 'param c=""'] + + +@pytest.mark.parametrize( + 'code, index, name, is_definition', [ + ('name', 0, 'name', False), + ('a = f(x)', 0, 'a', True), + ('a = f(x)', 1, 'f', False), + ('a = f(x)', 2, 'x', False), + ] +) +def test_is_definition(get_names, code, index, name, is_definition): + d = get_names( + dedent(code), + references=True, + all_scopes=True, + )[index] + assert d.name == name + assert d.is_definition() == is_definition + + +@pytest.mark.parametrize( + 'code, expected', ( + ('import x as a', [False, True]), + ('from x import y', [False, True]), + ('from x.z import y', [False, False, True]), + ) +) +def test_is_definition_import(get_names, code, expected): + ns = get_names(dedent(code), references=True, all_scopes=True) + # Assure that names are definitely sorted. + ns = sorted(ns, key=lambda name: (name.line, name.column)) + assert [name.is_definition() for name in ns] == expected + + +def test_parent(Script): + def _parent(source, line=None, column=None): + def_, = Script(dedent(source)).goto(line, column) + return def_.parent() + + parent = _parent('foo=1\nfoo') + assert parent.type == 'module' + + parent = _parent(''' + def spam(): + if 1: + y=1 + y''') + assert parent.name == 'spam' + assert parent.parent().type == 'module' + + +def test_parent_on_function(Script): + code = 'def spam():\n pass' + def_, = Script(code).goto(line=1, column=len('def spam')) + parent = def_.parent() + assert parent.name == '__main__' + assert parent.type == 'module' + + +def test_parent_on_completion_and_else(Script): + script = Script(dedent('''\ + class Foo(): + def bar(name): name + Foo().bar''')) + + bar, = script.complete() + parent = bar.parent() + assert parent.name == 'Foo' + assert parent.type == 'class' + + param, name, = [d for d in script.get_names(all_scopes=True, references=True) + if d.name == 'name'] + parent = name.parent() + assert parent.name == 'bar' + assert parent.type == 'function' + parent = name.parent().parent() + assert parent.name == 'Foo' + assert parent.type == 'class' + + parent = param.parent() + assert parent.name == 'bar' + assert parent.type == 'function' + parent = param.parent().parent() + assert parent.name == 'Foo' + assert parent.type == 'class' + + parent = Script('str.join').complete()[0].parent() + assert parent.name == 'str' + assert parent.type == 'class' + + +def test_parent_on_closure(Script): + script = Script(dedent('''\ + class Foo(): + def bar(name): + def inner(): foo + return inner''')) + + names = script.get_names(all_scopes=True, references=True) + inner_func, inner_reference = filter(lambda d: d.name == 'inner', names) + foo, = filter(lambda d: d.name == 'foo', names) + + assert foo.parent().name == 'inner' + assert foo.parent().parent().name == 'bar' + assert foo.parent().parent().parent().name == 'Foo' + assert foo.parent().parent().parent().parent().name == '__main__' + + assert inner_func.parent().name == 'bar' + assert inner_func.parent().parent().name == 'Foo' + assert inner_reference.parent().name == 'bar' + assert inner_reference.parent().parent().name == 'Foo' + + +def test_parent_on_comprehension(Script): + ns = Script('''\ + def spam(): + return [i for i in range(5)] + ''').get_names(all_scopes=True) + + assert [name.name for name in ns] == ['spam', 'i'] + + assert ns[0].parent().name == '__main__' + assert ns[0].parent().type == 'module' + assert ns[1].parent().name == 'spam' + assert ns[1].parent().type == 'function' + + +def test_type(Script): + for c in Script('a = [str()]; a[0].').complete(): + if c.name == '__class__' and False: # TODO fix. + assert c.type == 'class' + else: + assert c.type in ('function', 'statement') + + for c in Script('list.').complete(): + assert c.type + + # Github issue #397, type should never raise an error. + for c in Script('import os; os.path.').complete(): + assert c.type + + +def test_type_II(Script): + """ + GitHub Issue #833, `keyword`s are seen as `module`s + """ + for c in Script('f').complete(): + if c.name == 'for': + assert c.type == 'keyword' + + +@pytest.mark.parametrize( + 'added_code, expected_type, expected_infer_type', [ + ('Foo().x', 'property', 'instance'), + ('Foo.x', 'property', 'property'), + ('Foo().y', 'function', 'function'), + ('Foo.y', 'function', 'function'), + ('Foo().z', 'function', 'function'), + ('Foo.z', 'function', 'function'), + ] +) +def test_class_types(goto_or_help_or_infer, added_code, expected_type, + expected_infer_type): + code = dedent('''\ + class Foo: + @property + def x(self): return 1 + @staticmethod + def y(self): ... + @classmethod + def z(self): ... + ''') + d, = goto_or_help_or_infer(code + added_code) + if goto_or_help_or_infer.type == 'infer': + assert d.type == expected_infer_type + else: + assert d.type == expected_type + + +""" +This tests the BaseName.goto function, not the jedi +function. They are not really different in functionality, but really +different as an implementation. +""" + + +def test_goto_repetition(get_names): + defs = get_names('a = 1; a', references=True, definitions=False) + # Repeat on the same variable. Shouldn't change once we're on a + # definition. + for _ in range(3): + assert len(defs) == 1 + ass = defs[0].goto() + assert ass[0].description == 'a = 1' + + +def test_goto_named_params(get_names): + src = """\ + def foo(a=1, bar=2): + pass + foo(bar=1) + """ + bar = get_names(dedent(src), references=True)[-1] + param = bar.goto()[0] + assert (param.line, param.column) == (1, 13) + assert param.type == 'param' + + +def test_class_call(get_names): + src = 'from threading import Thread; Thread(group=1)' + n = get_names(src, references=True)[-1] + assert n.name == 'group' + param_def = n.goto()[0] + assert param_def.name == 'group' + assert param_def.type == 'param' + + +def test_parentheses(get_names): + n = get_names('("").upper', references=True)[-1] + assert n.goto()[0].name == 'upper' + + +def test_import(get_names): + nms = get_names('from json import load', references=True) + assert nms[0].name == 'json' + assert nms[0].type == 'module' + n = nms[0].goto()[0] + assert n.name == 'json' + assert n.type == 'module' + + assert nms[1].name == 'load' + assert nms[1].type == 'function' + n = nms[1].goto()[0] + assert n.name == 'load' + assert n.type == 'function' + + nms = get_names('import os; os.path', references=True) + assert nms[0].name == 'os' + assert nms[0].type == 'module' + n = nms[0].goto()[0] + assert n.name == 'os' + assert n.type == 'module' + + nms = nms[2].goto() + assert nms + assert all(n.type == 'module' for n in nms) + assert 'posixpath' in {n.name for n in nms} + + nms = get_names('import os.path', references=True) + n = nms[0].goto()[0] + assert n.name == 'os' + assert n.type == 'module' + n = nms[1].goto()[0] + # This is very special, normally the name doesn't change, but since + # os.path is a sys.modules hack, it does. + assert n.name in ('macpath', 'ntpath', 'posixpath', 'os2emxpath') + assert n.type == 'module' + + +def test_import_alias(get_names): + nms = get_names('import json as foo', references=True) + assert nms[0].name == 'json' + assert nms[0].type == 'module' + assert nms[0]._name.tree_name.parent.type == 'dotted_as_name' + n = nms[0].goto()[0] + assert n.name == 'json' + assert n.type == 'module' + assert n._name._value.tree_node.type == 'file_input' + + assert nms[1].name == 'foo' + assert nms[1].type == 'module' + assert nms[1]._name.tree_name.parent.type == 'dotted_as_name' + ass = nms[1].goto() + assert len(ass) == 1 + assert ass[0].name == 'json' + assert ass[0].type == 'module' + assert ass[0]._name._value.tree_node.type == 'file_input' + + +def test_added_equals_to_params(Script): + def run(rest_source): + source = dedent(""" + def foo(bar, baz): + pass + """) + results = Script(source + rest_source).complete() + assert len(results) == 1 + return results[0] + + assert run('foo(bar').name_with_symbols == 'bar=' + assert run('foo(bar').complete == '=' + assert run('foo(bar').get_completion_prefix_length() == 3 + assert run('foo(bar, baz').complete == '=' + assert run('foo(bar, baz').get_completion_prefix_length() == 3 + assert run(' bar').name_with_symbols == 'bar' + assert run(' bar').complete == '' + assert run(' bar').get_completion_prefix_length() == 3 + x = run('foo(bar=isins').name_with_symbols + assert run('foo(bar=isins').get_completion_prefix_length() == 5 + assert x == 'isinstance' + + +def test_builtin_module_with_path(Script): + """ + This test simply tests if a module from /usr/lib/python3.8/lib-dynload/ has + a path or not. It shouldn't have a module_path, because that is just + confusing. + """ + semlock, = Script('from _multiprocessing import SemLock').infer() + assert isinstance(semlock._name, CompiledValueName) + assert semlock.module_path is None + assert semlock.in_builtin_module() is True + assert semlock.name == 'SemLock' + assert semlock.line is None + assert semlock.column is None + + +@pytest.mark.parametrize( + 'code, description', [ + ('int', 'instance int'), + ('str.index', 'instance int'), + ('1', None), + ] +) +def test_execute(Script, code, description): + definition, = Script(code).goto() + definitions = definition.execute() + if description is None: + assert not definitions + else: + d, = definitions + assert d.description == description + + +@pytest.mark.parametrize('goto', [False, True, None]) +@pytest.mark.parametrize( + 'code, name, file_name', [ + ('from pkg import Foo; Foo.foo', 'foo', '__init__.py'), + ('from pkg import Foo; Foo().foo', 'foo', '__init__.py'), + ('from pkg import Foo; Foo.bar', 'bar', 'module.py'), + ('from pkg import Foo; Foo().bar', 'bar', 'module.py'), + ]) +def test_inheritance_module_path(Script, goto, code, name, file_name): + base_path = get_example_dir('inheritance', 'pkg') + whatever_path = base_path.joinpath('NOT_EXISTING.py') + + script = Script(code, path=whatever_path) + if goto is None: + func, = script.infer() + else: + func, = script.goto(follow_imports=goto) + assert func.type == 'function' + assert func.name == name + assert func.module_path == base_path.joinpath(file_name) + + +def test_definition_goto_follow_imports(Script): + dumps = Script('from json import dumps\ndumps').get_names(references=True)[-1] + assert dumps.description == 'dumps' + no_follow, = dumps.goto() + assert no_follow.description == 'def dumps' + assert no_follow.line == 1 + assert no_follow.column == 17 + assert no_follow.module_name == '__main__' + follow, = dumps.goto(follow_imports=True) + assert follow.description == 'def dumps' + assert follow.line != 1 + assert follow.module_name == 'json' + + +@pytest.mark.parametrize( + 'code, expected', [ + ('1', 'int'), + ('x = None; x', 'None'), + ('n: Optional[str]; n', 'Optional[str]'), + ('n = None if xxxxx else ""; n', 'Optional[str]'), + ('n = None if xxxxx else str(); n', 'Optional[str]'), + ('n = None if xxxxx else str; n', 'Optional[Type[str]]'), + ('class Foo: pass\nFoo', 'Type[Foo]'), + ('class Foo: pass\nFoo()', 'Foo'), + + ('n: Type[List[int]]; n', 'Type[List[int]]'), + ('n: Type[List]; n', 'Type[list]'), + ('n: List; n', 'list'), + ('n: List[int]; n', 'List[int]'), + ('n: Iterable[int]; n', 'Iterable[int]'), + + ('n = [1]; n', 'List[int]'), + ('n = [1, ""]; n', 'List[Union[int, str]]'), + ('n = [1, str(), None]; n', 'List[Optional[Union[int, str]]]'), + ('n = {1, str()}; n', 'Set[Union[int, str]]'), + ('n = (1,); n', 'Tuple[int]'), + ('n = {1: ""}; n', 'Dict[int, str]'), + ('n = {1: "", 1.0: b""}; n', 'Dict[Union[float, int], Union[bytes, str]]'), + + ('n = next; n', 'Union[next(__i: Iterator[_T]) -> _T, ' + 'next(__i: Iterator[_T], default: _VT) -> Union[_T, _VT]]'), + ('abs', 'abs(__x: SupportsAbs[_T]) -> _T'), + ('def foo(x, y): return x if xxxx else y\nfoo(str(), 1)\nfoo', + 'foo(x: str, y: int) -> Union[int, str]'), + ('def foo(x, y = None): return x if xxxx else y\nfoo(str(), 1)\nfoo', + 'foo(x: str, y: int=None) -> Union[int, str]'), + ] +) +def test_get_type_hint(Script, code, expected): + code = 'from typing import *\n' + code + d, = Script(code).goto() + assert d.get_type_hint() == expected + + +def test_pseudotreenameclass_type(Script): + assert Script('from typing import Any\n').get_names()[0].type == 'class' + + +cls_code = '''\ +class AClass: + """my class""" + @staticmethod + def hello(): + func_var = 1 + return func_var +''' + + +@pytest.mark.parametrize( + 'code, pos, start, end', [ + ('def a_func():\n return "bar"\n', (1, 4), (1, 0), (2, 16)), + ('var1 = 12', (1, 0), (1, 0), (1, 9)), + ('var1 + 1', (1, 0), (1, 0), (1, 4)), + ('class AClass: pass', (1, 6), (1, 0), (1, 18)), + ('class AClass: pass\n', (1, 6), (1, 0), (1, 18)), + (cls_code, (1, 6), (1, 0), (6, 23)), + (cls_code, (4, 8), (4, 4), (6, 23)), + (cls_code, (5, 8), (5, 8), (5, 20)), + ] +) +def test_definition_start_end_position(Script, code, pos, start, end): + '''Tests for definition_start_position and definition_end_position''' + name = next( + n for n in Script(code=code).get_names(all_scopes=True, references=True) + if n._name.tree_name.start_pos <= pos <= n._name.tree_name.end_pos + ) + assert name.get_definition_start_position() == start + assert name.get_definition_end_position() == end diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_api/test_completion.py b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_completion.py new file mode 100644 index 000000000..de46223e9 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_completion.py @@ -0,0 +1,463 @@ +from os.path import join, sep as s, dirname, expanduser +import os +from textwrap import dedent +from itertools import count +from pathlib import Path + +import pytest + +from ..helpers import root_dir +from jedi.api.helpers import _start_match, _fuzzy_match +from jedi.inference.imports import _load_python_module +from jedi.file_io import KnownContentFileIO +from jedi.inference.base_value import ValueSet + + +def test_in_whitespace(Script): + code = dedent(''' + def x(): + pass''') + assert len(Script(code).complete(column=2)) > 20 + + +def test_empty_init(Script): + """This was actually an issue.""" + code = dedent('''\ + class X(object): pass + X(''') + assert not Script(code).complete() + + +def test_in_empty_space(Script): + code = dedent('''\ + class X(object): + def __init__(self): + hello + ''') + comps = Script(code).complete(3, 7) + self, = [c for c in comps if c.name == 'self'] + assert self.name == 'self' + def_, = self.infer() + assert def_.name == 'X' + + +def test_indent_value(Script): + """ + If an INDENT is the next supposed token, we should still be able to + complete. + """ + code = 'if 1:\nisinstanc' + comp, = Script(code).complete() + assert comp.name == 'isinstance' + + +def test_keyword_value(Script): + def get_names(*args, **kwargs): + return [d.name for d in Script(*args, **kwargs).complete()] + + names = get_names('if 1:\n pass\n') + assert 'if' in names + assert 'elif' in names + + +def test_os_nowait(Script): + """ github issue #45 """ + s = Script("import os; os.P_").complete() + assert 'P_NOWAIT' in [i.name for i in s] + + +def test_points_in_completion(Script): + """At some point, points were inserted into the completions, this + caused problems, sometimes. + """ + c = Script("if IndentationErr").complete() + assert c[0].name == 'IndentationError' + assert c[0].complete == 'or' + + +def test_loading_unicode_files_with_bad_global_charset(Script, monkeypatch, tmpdir): + dirname = str(tmpdir.mkdir('jedi-test')) + filename1 = join(dirname, 'test1.py') + filename2 = join(dirname, 'test2.py') + data = "# coding: latin-1\nfoo = 'm\xf6p'\n".encode("latin-1") + + with open(filename1, "wb") as f: + f.write(data) + s = Script("from test1 import foo\nfoo.", path=filename2) + s.complete(line=2, column=4) + + +def test_complete_expanduser(Script): + possibilities = os.scandir(expanduser('~')) + non_dots = [p for p in possibilities if not p.name.startswith('.') and len(p.name) > 1] + item = non_dots[0] + line = "'~%s%s'" % (os.sep, item.name) + s = Script(line) + expected_name = item.name + if item.is_dir(): + expected_name += os.path.sep + assert expected_name in [c.name for c in s.complete(column=len(line)-1)] + + +def test_fake_subnodes(Script): + """ + Test the number of subnodes of a fake object. + + There was a bug where the number of child nodes would grow on every + call to :func:``jedi.inference.compiled.fake.get_faked``. + + See Github PR#649 and isseu #591. + """ + def get_str_completion(values): + for c in values: + if c.name == 'str': + return c + limit = None + for i in range(2): + completions = Script('').complete() + c = get_str_completion(completions) + str_value, = c._name.infer() + n = len(str_value.tree_node.children[-1].children) + if i == 0: + limit = n + else: + assert n == limit + + +def test_generator(Script): + # Did have some problems with the usage of generator completions this + # way. + s = "def abc():\n" \ + " yield 1\n" \ + "abc()." + assert Script(s).complete() + + +def test_in_comment(Script): + assert Script(" # Comment").complete() + # TODO this is a bit ugly, that the behaviors in comments are different. + assert not Script("max_attr_value = int(2) # Cast to int for spe").complete() + + +def test_in_comment_before_string(Script): + assert not Script(" # Foo\n'asdf'").complete(line=1) + + +def test_async(Script, environment): + code = dedent(''' + foo = 3 + async def x(): + hey = 3 + ho''') + comps = Script(code).complete(column=4) + names = [c.name for c in comps] + assert 'foo' in names + assert 'hey' in names + + +def test_with_stmt_error_recovery(Script): + assert Script('with open('') as foo: foo.\na').complete(line=1) + + +def test_function_param_usage(Script): + c, = Script('def func(foo_value):\n str(foo_valu').complete() + assert c.complete == 'e' + assert c.name == 'foo_value' + + c1, c2 = Script('def func(foo_value):\n func(foo_valu').complete() + assert c1.complete == 'e' + assert c1.name == 'foo_value' + assert c2.complete == 'e=' + assert c2.name == 'foo_value=' + + +@pytest.mark.parametrize( + 'code, has_keywords', ( + ('', True), + ('x;', True), + ('1', False), + ('1 ', True), + ('1\t', True), + ('1\n', True), + ('1\\\n', True), + ) +) +def test_keyword_completion(Script, code, has_keywords): + assert has_keywords == any(x.is_keyword for x in Script(code).complete()) + + +f1 = join(root_dir, 'example.py') +f2 = join(root_dir, 'test', 'example.py') +os_path = 'from os.path import *\n' +# os.path.sep escaped +se = s * 2 if s == '\\' else s +current_dirname = os.path.basename(dirname(dirname(dirname(__file__)))) + + +@pytest.mark.parametrize( + 'file, code, column, expected', [ + # General tests / relative paths + (None, '"comp', None, []), # No files like comp + (None, '"test', None, [s]), + (None, '"test', 4, ['t' + s]), + ('example.py', '"test%scomp' % s, None, ['letion' + s]), + ('example.py', 'r"comp"', None, []), + ('example.py', 'r"tes"', None, []), + ('example.py', '1 + r"tes"', None, []), + ('example.py', 'r"tes"', 5, ['t' + s]), + ('example.py', 'r" tes"', 6, []), + ('test%sexample.py' % se, 'r"tes"', 5, ['t' + s]), + ('test%sexample.py' % se, 'r"test%scomp"' % s, 5, ['t' + s]), + ('test%sexample.py' % se, 'r"test%scomp"' % s, 11, ['letion' + s]), + ('test%sexample.py' % se, '"%s"' % join('test', 'completion', 'basi'), 21, ['c.py']), + ('example.py', 'rb"' + join('..', current_dirname, 'tes'), None, ['t' + s]), + + # Absolute paths + (None, f'"{root_dir.joinpath("test", "test_ca")}', None, ['che.py"']), + (None, f'"{root_dir.joinpath("test", "test_ca")}"', len(str(root_dir)) + 14, ['che.py']), + + # Longer quotes + ('example.py', 'r"""test', None, [s]), + ('example.py', 'r"""\ntest', None, []), + ('example.py', 'u"""tes\n', (1, 7), ['t' + s]), + ('example.py', '"""test%stest_cache.p"""' % s, 20, ['y']), + ('example.py', '"""test%stest_cache.p"""' % s, 19, ['py"""']), + + # Adding + ('example.py', '"test" + "%stest_cac' % se, None, ['he.py"']), + ('example.py', '"test" + "%s" + "test_cac' % se, None, ['he.py"']), + ('example.py', 'x = 1 + "test', None, []), + ('example.py', 'x = f("te" + "st)', 16, [s]), + ('example.py', 'x = f("te" + "st', 16, [s]), + ('example.py', 'x = f("te" + "st"', 16, [s]), + ('example.py', 'x = f("te" + "st")', 16, [s]), + ('example.py', 'x = f("t" + "est")', 16, [s]), + ('example.py', 'x = f(b"t" + "est")', 17, []), + ('example.py', '"test" + "', None, [s]), + + # __file__ + (f1, os_path + 'dirname(__file__) + "%stest' % s, None, [s]), + (f2, os_path + 'dirname(__file__) + "%stest_ca' % se, None, ['che.py"']), + (f2, os_path + 'dirname(abspath(__file__)) + sep + "test_ca', None, ['che.py"']), + (f2, os_path + 'join(dirname(__file__), "completion") + sep + "basi', None, ['c.py"']), + (f2, os_path + 'join("test", "completion") + sep + "basi', None, ['c.py"']), + + # inside join + (f2, os_path + 'join(dirname(__file__), "completion", "basi', None, ['c.py"']), + (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 43, ['c.py"']), + (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 43, ['c.py']), + (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 35, ['']), + (f2, os_path + 'join(dirname(__file__), "completion", "basi)', 33, ['on"']), + (f2, os_path + 'join(dirname(__file__), "completion", "basi")', 33, ['on"']), + + # join with one argument. join will not get inferred and the result is + # that directories and in a slash. This is unfortunate, but doesn't + # really matter. + (f2, os_path + 'join("tes', 9, ['t"']), + (f2, os_path + 'join(\'tes)', 9, ["t'"]), + (f2, os_path + 'join(r"tes"', 10, ['t']), + (f2, os_path + 'join("""tes""")', 11, ['t']), + + # Almost like join but not really + (f2, os_path + 'join["tes', 9, ['t' + s]), + (f2, os_path + 'join["tes"', 9, ['t' + s]), + (f2, os_path + 'join["tes"]', 9, ['t' + s]), + (f2, os_path + 'join[dirname(__file__), "completi', 33, []), + (f2, os_path + 'join[dirname(__file__), "completi"', 33, []), + (f2, os_path + 'join[dirname(__file__), "completi"]', 33, []), + + # With full paths + (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi', 49, ['on"']), + (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi"', 49, ['on']), + (f2, 'import os\nos.path.join(os.path.dirname(__file__), "completi")', 49, ['on']), + + # With alias + (f2, 'import os.path as p as p\np.join(p.dirname(__file__), "completi', None, ['on"']), + (f2, 'from os.path import dirname, join as j\nj(dirname(__file__), "completi', + None, ['on"']), + + # Trying to break it + (f2, os_path + 'join(["tes', 10, ['t' + s]), + (f2, os_path + 'join(["tes"]', 10, ['t' + s]), + (f2, os_path + 'join(["tes"])', 10, ['t' + s]), + (f2, os_path + 'join("test", "test_cac" + x,', 22, ['he.py']), + + # GH #1528 + (f2, "'a' 'b'", 4, Ellipsis), + ] +) +def test_file_path_completions(Script, file, code, column, expected): + line = None + if isinstance(column, tuple): + line, column = column + comps = Script(code, path=file).complete(line=line, column=column) + if expected is Ellipsis: + assert len(comps) > 100 # This is basically global completions. + else: + assert [c.complete for c in comps] == expected + + +def test_file_path_should_have_completions(Script): + assert Script('r"').complete() # See GH #1503 + + +_dict_keys_completion_tests = [ + ('ints[', 5, ['1', '50', Ellipsis]), + ('ints[]', 5, ['1', '50', Ellipsis]), + ('ints[1]', 5, ['1', '50', Ellipsis]), + ('ints[1]', 6, ['']), + ('ints[1', 5, ['1', '50', Ellipsis]), + ('ints[1', 6, ['']), + + ('ints[5]', 5, ['1', '50', Ellipsis]), + ('ints[5]', 6, ['0']), + ('ints[50', 5, ['1', '50', Ellipsis]), + ('ints[5', 6, ['0']), + ('ints[ 5', None, ['0']), + ('ints [ 5', None, ['0']), + ('ints[50', 6, ['0']), + ('ints[50', 7, ['']), + + ('strs[', 5, ["'asdf'", "'fbar'", "'foo'", Ellipsis]), + ('strs[]', 5, ["'asdf'", "'fbar'", "'foo'", Ellipsis]), + ("strs['", 6, ["asdf'", "fbar'", "foo'"]), + ("strs[']", 6, ["asdf'", "fbar'", "foo'"]), + ('strs["]', 6, ['asdf"', 'fbar"', 'foo"']), + ('strs["""]', 6, ['asdf', 'fbar', 'foo']), + ('strs["""]', 8, ['asdf"""', 'fbar"""', 'foo"""']), + ('strs[b"]', 8, []), + ('strs[r"asd', 10, ['f"']), + ('strs[r"asd"', 10, ['f']), + ('strs[R"asd', 10, ['f"']), + ('strs[ R"asd', None, ['f"']), + ('strs[\tR"asd', None, ['f"']), + ('strs[\nR"asd', None, ['f"']), + ('strs[f"asd', 10, []), + ('strs[br"""asd', 13, ['f"""']), + ('strs[br"""asd"""', 13, ['f']), + ('strs[ \t"""asd"""', 13, ['f']), + + ('strs["f', 7, ['bar"', 'oo"']), + ('strs["f"', 7, ['bar', 'oo']), + ('strs["f]', 7, ['bar"', 'oo"']), + ('strs["f"]', 7, ['bar', 'oo']), + + ('mixed[', 6, [r"'a\\sdf'", '1', '1.1', "b'foo'", Ellipsis]), + ('mixed[1', 7, ['', '.1']), + ('mixed[Non', 9, ['e']), + + ('casted["f', 9, ['3"', 'bar"', 'oo"']), + ('casted["f"', 9, ['3', 'bar', 'oo']), + ('casted["f3', 10, ['"']), + ('casted["f3"', 10, ['']), + ('casted_mod["f', 13, ['3"', 'bar"', 'oo"', 'ull"', 'uuu"']), + + ('keywords["', None, ['a"']), + ('keywords[Non', None, ['e']), + ('keywords[Fa', None, ['lse']), + ('keywords[Tr', None, ['ue']), + ('keywords[str', None, ['', 's']), +] + + +@pytest.mark.parametrize( + 'added_code, column, expected', _dict_keys_completion_tests +) +def test_dict_keys_completions(Script, added_code, column, expected): + code = dedent(r''' + ints = {1: ''} + ints[50] = 3.0 + strs = {'asdf': 1, u"""foo""": 2, r'fbar': 3} + mixed = {1: 2, 1.10: 4, None: 6, r'a\sdf': 8, b'foo': 9} + casted = dict(strs, f3=4, r'\\xyz') + casted_mod = dict(casted) + casted_mod["fuuu"] = 8 + casted_mod["full"] = 8 + keywords = {None: 1, False: 2, "a": 3} + ''') + comps = Script(code + added_code).complete(column=column) + if Ellipsis in expected: + # This means that global completions are part of this, so filter all of + # that out. + comps = [c for c in comps if not c._name.is_value_name and not c.is_keyword] + expected = [e for e in expected if e is not Ellipsis] + + assert [c.complete for c in comps] == expected + + +def test_dict_keys_in_weird_case(Script): + assert Script('a[\n# foo\nx]').complete(line=2, column=0) + + +def test_start_match(): + assert _start_match('Condition', 'C') + + +def test_fuzzy_match(): + assert _fuzzy_match('Condition', 'i') + assert not _fuzzy_match('Condition', 'p') + assert _fuzzy_match('Condition', 'ii') + assert not _fuzzy_match('Condition', 'Ciito') + assert _fuzzy_match('Condition', 'Cdiio') + + +def test_ellipsis_completion(Script): + assert Script('...').complete() == [] + + +@pytest.fixture +def module_injector(): + counter = count() + + def module_injector(inference_state, names, code): + assert isinstance(names, tuple) + file_io = KnownContentFileIO( + Path('foo/bar/module-injector-%s.py' % next(counter)).absolute(), + code + ) + v = _load_python_module(inference_state, file_io, names) + inference_state.module_cache.add(names, ValueSet([v])) + + return module_injector + + +def test_completion_cache(Script, module_injector): + """ + For some modules like numpy, tensorflow or pandas we cache docstrings and + type to avoid them slowing us down, because they are huge. + """ + script = Script('import numpy; numpy.foo') + module_injector(script._inference_state, ('numpy',), 'def foo(a): "doc"') + c, = script.complete() + assert c.name == 'foo' + assert c.type == 'function' + assert c.docstring() == 'foo(a)\n\ndoc' + + code = dedent('''\ + class foo: + 'doc2' + def __init__(self): + pass + ''') + script = Script('import numpy; numpy.foo') + module_injector(script._inference_state, ('numpy',), code) + # The outpus should still be the same + c, = script.complete() + assert c.name == 'foo' + assert c.type == 'function' + assert c.docstring() == 'foo(a)\n\ndoc' + cls, = c.infer() + assert cls.type == 'class' + assert cls.docstring() == 'foo()\n\ndoc2' + + +@pytest.mark.parametrize('module', ['typing', 'os']) +def test_module_completions(Script, module): + for c in Script('import {module}; {module}.'.format(module=module)).complete(): + # Just make sure that there are no errors + c.type + c.docstring() + + +def test_whitespace_at_end_after_dot(Script): + assert 'strip' in [c.name for c in Script('str. ').complete()] diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_api/test_context.py b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_context.py new file mode 100644 index 000000000..6fabb2731 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_context.py @@ -0,0 +1,115 @@ +import pytest + + +def _iter_hierarchy(context): + def iter(context): + while context is not None: + yield context + context = context.parent() + + return reversed(list(iter(context))) + + +func_code = '''\ +def func1(x, y): + pass + +def func2(): + what ? +i = 3 + +def func3(): + 1''' +cls_code = '''\ +class Foo: + def x(): + def y(): + pass +''' +cls_nested = '''\ +class C: + class D: + def f(): + pass +''' +lambda_ = '''\ +def x(): + (lambda x: + lambda: y + ) +''' +comprehension = ''' +def f(x): + [x + for + x + in x + ]''' + +with_brackets = '''\ +def x(): + [ + + ] +''' + + +@pytest.mark.parametrize( + 'code, line, column, full_name, expected_parents', [ + ('', None, None, 'myfile', []), + (' ', None, 0, 'myfile', []), + + (func_code, 1, 0, 'myfile', []), + (func_code, 1, None, 'myfile.func1', ['func1']), + (func_code, 1, 1, 'myfile.func1', ['func1']), + (func_code, 1, 4, 'myfile.func1', ['func1']), + (func_code, 1, 10, 'myfile.func1', ['func1']), + + (func_code, 3, 0, 'myfile', []), + (func_code, 5, None, 'myfile.func2', ['func2']), + (func_code, 6, None, 'myfile', []), + (func_code, 7, None, 'myfile', []), + (func_code, 9, None, 'myfile.func3', ['func3']), + + (cls_code, None, None, 'myfile', []), + (cls_code + ' ', None, None, 'myfile.Foo', ['Foo']), + (cls_code + ' ' * 3, None, None, 'myfile.Foo', ['Foo']), + (cls_code + ' ' * 4, None, None, 'myfile.Foo', ['Foo']), + (cls_code + ' ' * 5, None, None, 'myfile.Foo.x', ['Foo', 'x']), + (cls_code + ' ' * 8, None, None, 'myfile.Foo.x', ['Foo', 'x']), + (cls_code + ' ' * 12, None, None, None, ['Foo', 'x', 'y']), + + (cls_code, 4, 0, 'myfile', []), + (cls_code, 4, 3, 'myfile.Foo', ['Foo']), + (cls_code, 4, 4, 'myfile.Foo', ['Foo']), + (cls_code, 4, 5, 'myfile.Foo.x', ['Foo', 'x']), + (cls_code, 4, 8, 'myfile.Foo.x', ['Foo', 'x']), + (cls_code, 4, 12, None, ['Foo', 'x', 'y']), + (cls_code, 1, 1, 'myfile.Foo', ['Foo']), + + (cls_nested, 4, None, 'myfile.C.D.f', ['C', 'D', 'f']), + (cls_nested, 4, 3, 'myfile.C', ['C']), + + (lambda_, 2, 9, 'myfile.x', ['x']), # the lambda keyword + (lambda_, 2, 13, 'myfile.x', ['x']), # the lambda param + (lambda_, 3, 0, 'myfile', []), # Within brackets, but they are ignored. + (lambda_, 3, 8, 'myfile.x', ['x']), + (lambda_, 3, None, 'myfile.x', ['x']), + + (comprehension, 2, None, 'myfile.f', ['f']), + (comprehension, 3, None, 'myfile.f', ['f']), + (comprehension, 4, None, 'myfile.f', ['f']), + (comprehension, 5, None, 'myfile.f', ['f']), + (comprehension, 6, None, 'myfile.f', ['f']), + + # Brackets are just ignored. + (with_brackets, 3, None, 'myfile', []), + (with_brackets, 4, 4, 'myfile.x', ['x']), + (with_brackets, 4, 5, 'myfile.x', ['x']), + ] +) +def test_context(Script, code, line, column, full_name, expected_parents): + context = Script(code, path='/foo/myfile.py').get_context(line, column) + assert context.full_name == full_name + parent_names = [d.name for d in _iter_hierarchy(context)] + assert parent_names == ['myfile'] + expected_parents diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_api/test_documentation.py b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_documentation.py new file mode 100644 index 000000000..4c09d6121 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_documentation.py @@ -0,0 +1,143 @@ +from textwrap import dedent + +import pytest + + +def test_error_leaf_keyword_doc(Script): + d, = Script("or").help(1, 1) + assert len(d.docstring()) > 100 + assert d.name == 'or' + + +def test_error_leaf_operator_doc(Script): + d, = Script("==").help() + assert len(d.docstring()) > 100 + assert d.name == '==' + + +def test_keyword_completion(Script): + k = Script("fro").complete()[0] + imp_start = 'The "import' + assert k.docstring(raw=True).startswith(imp_start) + assert k.docstring().startswith(imp_start) + + +def test_import_keyword(Script): + d, = Script("import x").help(column=0) + assert d.docstring().startswith('The "import" statement') + # unrelated to #44 + + +def test_import_keyword_with_gotos(goto_or_infer): + assert not goto_or_infer("import x", column=0) + + +def test_operator_doc(Script): + d, = Script("a == b").help(1, 3) + assert len(d.docstring()) > 100 + + +@pytest.mark.parametrize( + 'code, help_part', [ + ('str', 'Create a new string object'), + ('str.strip', 'Return a copy of the string'), + ] +) +def test_stdlib_doc(Script, code, help_part): + h, = Script(code).help() + assert help_part in h.docstring(raw=True) + + +def test_lambda(Script): + d, = Script('lambda x: x').help(column=0) + assert d.type == 'keyword' + assert d.docstring().startswith('Lambdas\n*******') + + +@pytest.mark.parametrize( + 'code, kwargs', [ + ('?', {}), + ('""', {}), + ('"', {}), + ] +) +def test_help_no_returns(Script, code, kwargs): + assert not Script(code).help(**kwargs) + + +@pytest.mark.parametrize( + 'to_execute, expected_doc', [ + ('X.x', 'Yeah '), + ('X().x', 'Yeah '), + ('X.y', 'f g '), + ('X.z', ''), + ] +) +def test_attribute_docstrings(goto_or_help, expected_doc, to_execute): + code = dedent('''\ + class X: + "ha" + x = 3 + """ Yeah """ + y = 5 + "f g " + z = lambda x: 1 + ''') + + d, = goto_or_help(code + to_execute) + assert d.docstring() == expected_doc + + +def test_version_info(Script): + """ + Version info is a bit special, because it needs to be fast for some ifs, so + it's a special object that we have to check. + """ + s = Script(dedent("""\ + import sys + + sys.version_info""")) + + c, = s.complete() + assert c.docstring() == 'sys.version_info\n\nVersion information as a named tuple.' + + +def test_builtin_docstring(goto_or_help_or_infer): + d, = goto_or_help_or_infer('open') + + doc = d.docstring() + assert doc.startswith('open(file: ') + assert 'Open file' in doc + + +def test_docstring_decorator(goto_or_help_or_infer): + code = dedent(''' + import types + + def dec(func): + return types.FunctionType() + + @dec + def func(a, b): + "hello" + return + func''') + d, = goto_or_help_or_infer(code) + + doc = d.docstring() + assert doc == 'FunctionType(*args: Any, **kwargs: Any) -> Any\n\nhello' + + +@pytest.mark.parametrize('code', ['', '\n', ' ']) +def test_empty(Script, code): + assert not Script(code).help(1, 0) + + +@pytest.mark.parametrize('code', ['f()', '(bar or baz)', 'f[3]']) +def test_no_help_for_operator(Script, code): + assert not Script(code).help() + + +@pytest.mark.parametrize('code', ['()', '(1,)', '[]', '[1]', 'f[]']) +def test_help_for_operator(Script, code): + assert Script(code).help() diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_api/test_environment.py b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_environment.py new file mode 100644 index 000000000..621fcb8ee --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_environment.py @@ -0,0 +1,154 @@ +import os +import sys + +import pytest + +import jedi +from jedi.api.environment import get_default_environment, find_virtualenvs, \ + InvalidPythonEnvironment, find_system_environments, \ + get_system_environment, create_environment, InterpreterEnvironment, \ + get_cached_default_environment + + +def test_sys_path(): + assert get_default_environment().get_sys_path() + + +def test_find_system_environments(): + envs = list(find_system_environments()) + assert len(envs) + for env in envs: + assert env.version_info + assert env.get_sys_path() + parser_version = env.get_grammar().version_info + assert parser_version[:2] == env.version_info[:2] + + +@pytest.mark.parametrize( + 'version', + ['3.6', '3.7', '3.8', '3.9'] +) +def test_versions(version): + try: + env = get_system_environment(version) + except InvalidPythonEnvironment: + if int(version.replace('.', '')) == str(sys.version_info[0]) + str(sys.version_info[1]): + # At least the current version has to work + raise + pytest.skip() + + assert version == str(env.version_info[0]) + '.' + str(env.version_info[1]) + assert env.get_sys_path() + + +def test_load_module(inference_state): + access_path = inference_state.compiled_subprocess.load_module( + dotted_name='math', + sys_path=inference_state.get_sys_path() + ) + name, access_handle = access_path.accesses[0] + + assert access_handle.py__bool__() is True + assert access_handle.get_api_type() == 'module' + with pytest.raises(AttributeError): + access_handle.py__mro__() + + +def test_error_in_environment(inference_state, Script, environment): + if isinstance(environment, InterpreterEnvironment): + pytest.skip("We don't catch these errors at the moment.") + + # Provoke an error to show how Jedi can recover from it. + with pytest.raises(jedi.InternalError): + inference_state.compiled_subprocess._test_raise_error(KeyboardInterrupt) + # The second time it should raise an InternalError again. + with pytest.raises(jedi.InternalError): + inference_state.compiled_subprocess._test_raise_error(KeyboardInterrupt) + # Jedi should still work. + def_, = Script('str').infer() + assert def_.name == 'str' + + +def test_stdout_in_subprocess(inference_state, Script): + inference_state.compiled_subprocess._test_print(stdout='.') + Script('1').infer() + + +def test_killed_subprocess(inference_state, Script, environment): + if isinstance(environment, InterpreterEnvironment): + pytest.skip("We cannot kill our own process") + # Just kill the subprocess. + inference_state.compiled_subprocess._compiled_subprocess._get_process().kill() + # Since the process was terminated (and nobody knows about it) the first + # Jedi call fails. + with pytest.raises(jedi.InternalError): + Script('str').infer() + + def_, = Script('str').infer() + # Jedi should now work again. + assert def_.name == 'str' + + +def test_not_existing_virtualenv(monkeypatch): + """Should not match the path that was given""" + path = '/foo/bar/jedi_baz' + monkeypatch.setenv('VIRTUAL_ENV', path) + assert get_default_environment().executable != path + + +def test_working_venv(venv_path, monkeypatch): + monkeypatch.setenv('VIRTUAL_ENV', venv_path) + assert get_default_environment().path == venv_path + + +def test_scanning_venvs(venv_path): + parent_dir = os.path.dirname(venv_path) + assert any(venv.path == venv_path + for venv in find_virtualenvs([parent_dir])) + + +def test_create_environment_venv_path(venv_path): + environment = create_environment(venv_path) + assert environment.path == venv_path + + +def test_create_environment_executable(): + environment = create_environment(sys.executable) + assert environment.executable == sys.executable + + +def test_get_default_environment_from_env_does_not_use_safe(tmpdir, monkeypatch): + fake_python = os.path.join(str(tmpdir), 'fake_python') + with open(fake_python, 'w', newline='') as f: + f.write('') + + def _get_subprocess(self): + if self._start_executable != fake_python: + raise RuntimeError('Should not get called!') + self.executable = fake_python + self.path = 'fake' + + monkeypatch.setattr('jedi.api.environment.Environment._get_subprocess', + _get_subprocess) + + monkeypatch.setenv('VIRTUAL_ENV', fake_python) + env = get_default_environment() + assert env.path == 'fake' + + +@pytest.mark.parametrize('virtualenv', ['', 'fufuuuuu', sys.prefix]) +def test_get_default_environment_when_embedded(monkeypatch, virtualenv): + # When using Python embedded, sometimes the executable is not a Python + # executable. + executable_name = 'RANDOM_EXE' + monkeypatch.setattr(sys, 'executable', executable_name) + monkeypatch.setenv('VIRTUAL_ENV', virtualenv) + env = get_default_environment() + assert env.executable != executable_name + + +def test_changing_venv(venv_path, monkeypatch): + monkeypatch.setitem(os.environ, 'VIRTUAL_ENV', venv_path) + get_cached_default_environment() + monkeypatch.setitem(os.environ, 'VIRTUAL_ENV', sys.executable) + assert get_cached_default_environment().executable == sys.executable diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_api/test_full_name.py b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_full_name.py new file mode 100644 index 000000000..446901416 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_full_name.py @@ -0,0 +1,127 @@ +""" +Tests for :attr:`.BaseName.full_name`. + +There are three kinds of test: + +#. Test classes derived from :class:`MixinTestFullName`. + Child class defines :attr:`.operation` to alter how + the api definition instance is created. + +#. :class:`TestFullDefinedName` is to test combination of + ``obj.full_name`` and ``jedi.defined_names``. + +#. Misc single-function tests. +""" + +import textwrap +from unittest import TestCase + +import pytest + +import jedi + + +class MixinTestFullName(object): + operation = None + + @pytest.fixture(autouse=True) + def init(self, Script, environment): + self.Script = Script + self.environment = environment + + def check(self, source, desired): + script = self.Script(textwrap.dedent(source)) + definitions = getattr(script, self.operation)() + for d in definitions: + self.assertEqual(d.full_name, desired) + + def test_os_path_join(self): + self.check('import os; os.path.join', 'os.path.join') + + def test_builtin(self): + self.check('TypeError', 'builtins.TypeError') + + +class TestFullNameWithGotoDefinitions(MixinTestFullName, TestCase): + operation = 'infer' + + def test_tuple_mapping(self): + self.check(""" + import re + any_re = re.compile('.*') + any_re""", 'typing.Pattern') + + def test_from_import(self): + self.check('from os import path', 'os.path') + + +class TestFullNameWithCompletions(MixinTestFullName, TestCase): + operation = 'complete' + + +class TestFullDefinedName(TestCase): + """ + Test combination of ``obj.full_name`` and ``jedi.Script.get_names``. + """ + @pytest.fixture(autouse=True) + def init(self, environment): + self.environment = environment + + def check(self, source, desired): + script = jedi.Script(textwrap.dedent(source), environment=self.environment) + definitions = script.get_names() + full_names = [d.full_name for d in definitions] + self.assertEqual(full_names, desired) + + def test_local_names(self): + self.check(""" + def f(): pass + class C: pass + """, ['__main__.f', '__main__.C']) + + def test_imports(self): + self.check(""" + import os + from os import path + from os.path import join + from os import path as opath + """, ['os', 'os.path', 'os.path.join', 'os.path']) + + +def test_sub_module(Script, jedi_path): + """ + ``full_name needs to check sys.path to actually find it's real path module + path. + """ + sys_path = [jedi_path] + project = jedi.Project('.', sys_path=sys_path) + defs = Script('from jedi.api import classes; classes', project=project).infer() + assert [d.full_name for d in defs] == ['jedi.api.classes'] + defs = Script('import jedi.api; jedi.api', project=project).infer() + assert [d.full_name for d in defs] == ['jedi.api'] + + +def test_os_path(Script): + d, = Script('from os.path import join').complete() + assert d.full_name == 'os.path.join' + d, = Script('import os.p').complete() + assert d.full_name == 'os.path' + + +def test_os_issues(Script): + """Issue #873""" + # nt is not found, because it's deleted + assert [c.name for c in Script('import os\nos.nt''').complete()] == [] + + +def test_param_name(Script): + name, = Script('class X:\n def foo(bar): bar''').goto() + assert name.type == 'param' + assert name.full_name is None + + +def test_variable_in_func(Script): + names = Script('def f(): x = 3').get_names(all_scopes=True) + x = names[-1] + assert x.name == 'x' + assert x.full_name == '__main__.f.x' diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_api/test_interpreter.py b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_interpreter.py new file mode 100644 index 000000000..131ec6c16 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_interpreter.py @@ -0,0 +1,741 @@ +""" +Tests of ``jedi.api.Interpreter``. +""" +import sys +import warnings +import typing + +import pytest + +import jedi +from jedi.inference.compiled import mixed +from importlib import import_module + + +class _GlobalNameSpace: + class SideEffectContainer: + pass + + +def get_completion(source, namespace): + i = jedi.Interpreter(source, [namespace]) + completions = i.complete() + assert len(completions) == 1 + return completions[0] + + +def test_builtin_details(): + import keyword + + class EmptyClass: + pass + + variable = EmptyClass() + + def func(): + pass + + cls = get_completion('EmptyClass', locals()) + var = get_completion('variable', locals()) + f = get_completion('func', locals()) + m = get_completion('keyword', locals()) + assert cls.type == 'class' + assert var.type == 'instance' + assert f.type == 'function' + assert m.type == 'module' + + +def test_numpy_like_non_zero(): + """ + Numpy-like array can't be caster to bool and need to be compacre with + `is`/`is not` and not `==`/`!=` + """ + + class NumpyNonZero: + + def __zero__(self): + raise ValueError('Numpy arrays would raise and tell you to use .any() or all()') + def __bool__(self): + raise ValueError('Numpy arrays would raise and tell you to use .any() or all()') + + class NumpyLike: + + def __eq__(self, other): + return NumpyNonZero() + + def something(self): + pass + + x = NumpyLike() + d = {'a': x} + + # just assert these do not raise. They (strangely) trigger different + # codepath + get_completion('d["a"].some', {'d': d}) + get_completion('x.some', {'x': x}) + + +def test_nested_resolve(): + class XX: + def x(): + pass + + cls = get_completion('XX', locals()) + func = get_completion('XX.x', locals()) + assert (func.line, func.column) == (cls.line + 1, 12) + + +def test_side_effect_completion(): + """ + In the repl it's possible to cause side effects that are not documented in + Python code, however we want references to Python code as well. Therefore + we need some mixed kind of magic for tests. + """ + _GlobalNameSpace.SideEffectContainer.foo = 1 + side_effect = get_completion('SideEffectContainer', _GlobalNameSpace.__dict__) + + # It's a class that contains MixedObject. + value, = side_effect._name.infer() + assert isinstance(value, mixed.MixedObject) + foo = get_completion('SideEffectContainer.foo', _GlobalNameSpace.__dict__) + assert foo.name == 'foo' + + +def _assert_interpreter_complete(source, namespace, completions, + **kwds): + script = jedi.Interpreter(source, [namespace], **kwds) + cs = script.complete() + actual = [c.name for c in cs] + assert sorted(actual) == sorted(completions) + + +def test_complete_raw_function(): + from os.path import join + _assert_interpreter_complete('join("").up', locals(), ['upper']) + + +def test_complete_raw_function_different_name(): + from os.path import join as pjoin + _assert_interpreter_complete('pjoin("").up', locals(), ['upper']) + + +def test_complete_raw_module(): + import os + _assert_interpreter_complete('os.path.join("a").up', locals(), ['upper']) + + +def test_complete_raw_instance(): + import datetime + dt = datetime.datetime(2013, 1, 1) + completions = ['time', 'timetz', 'timetuple', 'timestamp'] + _assert_interpreter_complete('(dt - dt).ti', locals(), completions) + + +def test_list(): + array = ['haha', 1] + _assert_interpreter_complete('array[0].uppe', locals(), ['upper']) + _assert_interpreter_complete('array[0].real', locals(), []) + + # something different, no index given, still just return the right + _assert_interpreter_complete('array[int].real', locals(), ['real']) + _assert_interpreter_complete('array[int()].real', locals(), ['real']) + # inexistent index + _assert_interpreter_complete('array[2].upper', locals(), ['upper']) + + +def test_getattr(): + class Foo1: + bar = [] + baz = 'bar' + _assert_interpreter_complete('getattr(Foo1, baz).app', locals(), ['append']) + + +def test_slice(): + class Foo1: + bar = [] + baz = 'xbarx' + _assert_interpreter_complete('getattr(Foo1, baz[1:-1]).append', locals(), ['append']) + + +def test_getitem_side_effects(): + class Foo2: + def __getitem__(self, index): + # Possible side effects here, should therefore not call this. + if True: + raise NotImplementedError() + return index + + foo = Foo2() + _assert_interpreter_complete('foo["asdf"].upper', locals(), ['upper']) + + +@pytest.mark.parametrize('stacklevel', [1, 2]) +@pytest.mark.filterwarnings("error") +def test_property_warnings(stacklevel, allow_unsafe_getattr): + class Foo3: + @property + def prop(self): + # Possible side effects here, should therefore not call this. + warnings.warn("foo", DeprecationWarning, stacklevel=stacklevel) + return '' + + foo = Foo3() + expected = ['upper'] if allow_unsafe_getattr else [] + _assert_interpreter_complete('foo.prop.uppe', locals(), expected) + + +@pytest.mark.parametrize('class_is_findable', [False, True]) +def test__getattr__completions(allow_unsafe_getattr, class_is_findable): + class CompleteGetattr(object): + def __getattr__(self, name): + if name == 'foo': + return self + if name == 'fbar': + return '' + raise AttributeError(name) + + def __dir__(self): + return ['foo', 'fbar'] + object.__dir__(self) + + if not class_is_findable: + CompleteGetattr.__name__ = "something_somewhere" + namespace = {'c': CompleteGetattr()} + expected = ['foo', 'fbar'] + _assert_interpreter_complete('c.f', namespace, expected) + + # Completions don't work for class_is_findable, because __dir__ is checked + # for interpreter analysis, but if the static analysis part tries to help + # it will not work. However static analysis is pretty good and understands + # how gettatr works (even the ifs/comparisons). + if not allow_unsafe_getattr: + expected = [] + _assert_interpreter_complete('c.foo.f', namespace, expected) + _assert_interpreter_complete('c.foo.foo.f', namespace, expected) + _assert_interpreter_complete('c.foo.uppe', namespace, []) + + expected_int = ['upper'] if allow_unsafe_getattr or class_is_findable else [] + _assert_interpreter_complete('c.foo.fbar.uppe', namespace, expected_int) + + +@pytest.fixture(params=[False, True]) +def allow_unsafe_getattr(request, monkeypatch): + monkeypatch.setattr(jedi.Interpreter, '_allow_descriptor_getattr_default', request.param) + return request.param + + +def test_property_error_oldstyle(allow_unsafe_getattr): + lst = [] + + class Foo3: + @property + def bar(self): + lst.append(1) + raise ValueError + + foo = Foo3() + _assert_interpreter_complete('foo.bar', locals(), ['bar']) + _assert_interpreter_complete('foo.bar.baz', locals(), []) + + if allow_unsafe_getattr: + assert lst == [1] + else: + # There should not be side effects + assert lst == [] + + +def test_property_error_newstyle(allow_unsafe_getattr): + lst = [] + + class Foo3(object): + @property + def bar(self): + lst.append(1) + raise ValueError + + foo = Foo3() + _assert_interpreter_complete('foo.bar', locals(), ['bar']) + _assert_interpreter_complete('foo.bar.baz', locals(), []) + + if allow_unsafe_getattr: + assert lst == [1] + else: + # There should not be side effects + assert lst == [] + + +def test_property_content(): + class Foo3(object): + @property + def bar(self): + return 1 + + foo = Foo3() + def_, = jedi.Interpreter('foo.bar', [locals()]).infer() + assert def_.name == 'int' + + +def test_param_completion(): + def foo(bar): + pass + + lambd = lambda xyz: 3 + + _assert_interpreter_complete('foo(bar', locals(), ['bar=']) + _assert_interpreter_complete('lambd(xyz', locals(), ['xyz=']) + + +def test_endless_yield(): + lst = [1] * 10000 + # If iterating over lists it should not be possible to take an extremely + # long time. + _assert_interpreter_complete('list(lst)[9000].rea', locals(), ['real']) + + +def test_completion_params(): + foo = lambda a, b=3: None + + script = jedi.Interpreter('foo', [locals()]) + c, = script.complete() + sig, = c.get_signatures() + assert [p.name for p in sig.params] == ['a', 'b'] + assert sig.params[0].infer() == [] + t, = sig.params[1].infer() + assert t.name == 'int' + + +def test_completion_param_annotations(): + # Need to define this function not directly in Python. Otherwise Jedi is too + # clever and uses the Python code instead of the signature object. + code = 'def foo(a: 1, b: str, c: int = 1.0) -> bytes: pass' + exec(code, locals()) + script = jedi.Interpreter('foo', [locals()]) + c, = script.complete() + sig, = c.get_signatures() + a, b, c = sig.params + assert a.infer() == [] + assert [d.name for d in b.infer()] == ['str'] + assert {d.name for d in c.infer()} == {'int', 'float'} + + assert a.description == 'param a: 1' + assert b.description == 'param b: str' + assert c.description == 'param c: int=1.0' + + d, = jedi.Interpreter('foo()', [locals()]).infer() + assert d.name == 'bytes' + + +def test_keyword_argument(): + def f(some_keyword_argument): + pass + + c, = jedi.Interpreter("f(some_keyw", [{'f': f}]).complete() + assert c.name == 'some_keyword_argument=' + assert c.complete == 'ord_argument=' + + # Make it impossible for jedi to find the source of the function. + f.__name__ = 'xSOMETHING' + c, = jedi.Interpreter("x(some_keyw", [{'x': f}]).complete() + assert c.name == 'some_keyword_argument=' + + +def test_more_complex_instances(): + class Something: + def foo(self, other): + return self + + class Base: + def wow(self): + return Something() + + script = jedi.Interpreter('Base().wow().foo', [locals()]) + c, = script.complete() + assert c.name == 'foo' + + x = Base() + script = jedi.Interpreter('x.wow().foo', [locals()]) + c, = script.complete() + assert c.name == 'foo' + + +def test_repr_execution_issue(): + """ + Anticipate inspect.getfile executing a __repr__ of all kinds of objects. + See also #919. + """ + class ErrorRepr: + def __repr__(self): + raise Exception('xyz') + + er = ErrorRepr() + + script = jedi.Interpreter('er', [locals()]) + d, = script.infer() + assert d.name == 'ErrorRepr' + assert d.type == 'instance' + + +def test_dir_magic_method(allow_unsafe_getattr): + class CompleteAttrs(object): + def __getattr__(self, name): + if name == 'foo': + return 1 + if name == 'bar': + return 2 + raise AttributeError(name) + + def __dir__(self): + return ['foo', 'bar'] + object.__dir__(self) + + itp = jedi.Interpreter("ca.", [{'ca': CompleteAttrs()}]) + completions = itp.complete() + names = [c.name for c in completions] + assert ('__dir__' in names) is True + assert '__class__' in names + assert 'foo' in names + assert 'bar' in names + + foo = [c for c in completions if c.name == 'foo'][0] + if allow_unsafe_getattr: + inst, = foo.infer() + assert inst.name == 'int' + assert inst.type == 'instance' + else: + assert foo.infer() == [] + + +def test_name_not_findable(): + class X(): + if 0: + NOT_FINDABLE + + def hidden(self): + return + + hidden.__name__ = 'NOT_FINDABLE' + + setattr(X, 'NOT_FINDABLE', X.hidden) + + assert jedi.Interpreter("X.NOT_FINDA", [locals()]).complete() + + +def test_stubs_working(): + from multiprocessing import cpu_count + defs = jedi.Interpreter("cpu_count()", [locals()]).infer() + assert [d.name for d in defs] == ['int'] + + +def test_sys_path_docstring(): # Was an issue in #1298 + import jedi + s = jedi.Interpreter("from sys import path\npath", namespaces=[locals()]) + s.complete(line=2, column=4)[0].docstring() + + +@pytest.mark.parametrize( + 'code, completions', [ + ('x[0].uppe', ['upper']), + ('x[1337].uppe', ['upper']), + ('x[""].uppe', ['upper']), + ('x.appen', ['append']), + + ('y.add', ['add']), + ('y[0].', []), + ('list(y)[0].', []), # TODO use stubs properly to improve this. + + ('z[0].uppe', ['upper']), + ('z[0].append', ['append']), + ('z[1].uppe', ['upper']), + ('z[1].append', []), + + ('collections.deque().app', ['append', 'appendleft']), + ('deq.app', ['append', 'appendleft']), + ('deq.pop', ['pop', 'popleft']), + ('deq.pop().', []), + + ('collections.Counter("asdf").setdef', ['setdefault']), + ('collections.Counter("asdf").pop().imag', ['imag']), + ('list(collections.Counter("asdf").keys())[0].uppe', ['upper']), + ('counter.setdefa', ['setdefault']), + ('counter.pop().imag', []), # TODO stubs could make this better + ('counter.keys())[0].uppe', []), + + ('string.upper().uppe', ['upper']), + ('"".upper().uppe', ['upper']), + ] +) +def test_simple_completions(code, completions): + x = [str] + y = {1} + z = {1: str, 2: list} + import collections + deq = collections.deque([1]) + counter = collections.Counter(['asdf']) + string = '' + + defs = jedi.Interpreter(code, [locals()]).complete() + assert [d.name for d in defs] == completions + + +def test__wrapped__(): + from functools import lru_cache + + @lru_cache(maxsize=128) + def syslogs_to_df(): + pass + + c, = jedi.Interpreter('syslogs_to_df', [locals()]).complete() + # Apparently the function starts on the line where the decorator starts. + assert c.line == syslogs_to_df.__wrapped__.__code__.co_firstlineno + 1 + + +def test_illegal_class_instance(): + class X: + __class__ = 1 + X.__name__ = 'asdf' + d, = jedi.Interpreter('foo', [{'foo': X()}]).infer() + v, = d._name.infer() + assert not v.is_instance() + + +@pytest.mark.parametrize('module_name', ['sys', 'time', 'unittest.mock']) +def test_core_module_completes(module_name): + module = import_module(module_name) + assert jedi.Interpreter('module.', [locals()]).complete() + + +@pytest.mark.parametrize( + 'code, expected, index', [ + ('a(', ['a', 'b', 'c'], 0), + ('b(', ['b', 'c'], 0), + # Might or might not be correct, because c is given as a keyword + # argument as well, but that is just what inspect.signature returns. + ('c(', ['b', 'c'], 0), + ] +) +def test_partial_signatures(code, expected, index): + import functools + + def func(a, b, c): + pass + + a = functools.partial(func) + b = functools.partial(func, 1) + c = functools.partial(func, 1, c=2) + + sig, = jedi.Interpreter(code, [locals()]).get_signatures() + assert sig.name == 'partial' + assert [p.name for p in sig.params] == expected + assert index == sig.index + + +def test_type_var(): + """This was an issue before, see Github #1369""" + import typing + x = typing.TypeVar('myvar') + def_, = jedi.Interpreter('x', [locals()]).infer() + assert def_.name == 'TypeVar' + + +@pytest.mark.parametrize('class_is_findable', [False, True]) +def test_param_annotation_completion(class_is_findable): + class Foo: + bar = 3 + + if not class_is_findable: + Foo.__name__ = 'asdf' + + code = 'def CallFoo(x: Foo):\n x.ba' + def_, = jedi.Interpreter(code, [locals()]).complete() + assert def_.name == 'bar' + + +@pytest.mark.parametrize( + 'code, column, expected', [ + ('strs[', 5, ["'asdf'", "'fbar'", "'foo'", Ellipsis]), + ('strs[]', 5, ["'asdf'", "'fbar'", "'foo'", Ellipsis]), + ("strs['", 6, ["asdf'", "fbar'", "foo'"]), + ("strs[']", 6, ["asdf'", "fbar'", "foo'"]), + ('strs["]', 6, ['asdf"', 'fbar"', 'foo"']), + + ('mixed[', 6, [r"'a\\sdf'", '1', '1.1', "b'foo'", Ellipsis]), + ('mixed[1', 7, ['', '.1']), + ('mixed[Non', 9, ['e']), + + ('implicit[10', None, ['00']), + + ('inherited["', None, ['blablu"']), + ] +) +def test_dict_completion(code, column, expected): + strs = {'asdf': 1, """foo""": 2, r'fbar': 3} + mixed = {1: 2, 1.10: 4, None: 6, r'a\sdf': 8, b'foo': 9} + + class Inherited(dict): + pass + inherited = Inherited(blablu=3) + + namespaces = [locals(), {'implicit': {1000: 3}}] + comps = jedi.Interpreter(code, namespaces).complete(column=column) + if Ellipsis in expected: + # This means that global completions are part of this, so filter all of + # that out. + comps = [c for c in comps if not c._name.is_value_name and not c.is_keyword] + expected = [e for e in expected if e is not Ellipsis] + + assert [c.complete for c in comps] == expected + + +@pytest.mark.parametrize( + 'code, types', [ + ('dct[1]', ['int']), + ('dct["asdf"]', ['float']), + ('dct[r"asdf"]', ['float']), + ('dct["a"]', ['float', 'int']), + ] +) +def test_dict_getitem(code, types): + dct = {1: 2, "asdf": 1.0} + + comps = jedi.Interpreter(code, [locals()]).infer() + assert [c.name for c in comps] == types + + +@pytest.mark.parametrize('class_is_findable', [False, True]) +@pytest.mark.parametrize( + 'code, expected', [ + ('DunderCls()[0]', 'int'), + ('dunder[0]', 'int'), + ('next(DunderCls())', 'float'), + ('next(dunder)', 'float'), + ('for x in DunderCls(): x', 'str'), + #('for x in dunder: x', 'str'), + ] +) +def test_dunders(class_is_findable, code, expected): + from typing import Iterator + + class DunderCls: + def __getitem__(self, key) -> int: + pass + + def __iter__(self, key) -> Iterator[str]: + pass + + def __next__(self, key) -> float: + pass + + if not class_is_findable: + DunderCls.__name__ = 'asdf' + + dunder = DunderCls() + + n, = jedi.Interpreter(code, [locals()]).infer() + assert n.name == expected + + +def foo(): + raise KeyError + + +def bar(): + return float + + +@pytest.mark.parametrize( + 'annotations, result, code', [ + ({}, [], ''), + (None, [], ''), + ({'asdf': 'str'}, [], ''), + + ({'return': 'str'}, ['str'], ''), + ({'return': 'None'}, ['NoneType'], ''), + ({'return': 'str().upper'}, [], ''), + ({'return': 'foo()'}, [], ''), + ({'return': 'bar()'}, ['float'], ''), + + # typing is available via globals. + ({'return': 'typing.Union[str, int]'}, ['int', 'str'], ''), + ({'return': 'typing.Union["str", int]'}, + ['int', 'str'] if sys.version_info >= (3, 9) else ['int'], ''), + ({'return': 'typing.Union["str", 1]'}, [], ''), + ({'return': 'typing.Optional[str]'}, ['NoneType', 'str'], ''), + ({'return': 'typing.Optional[str, int]'}, [], ''), # Takes only one arg + ({'return': 'typing.Any'}, [], ''), + + ({'return': 'typing.Tuple[int, str]'}, + ['Tuple' if sys.version_info[:2] == (3, 6) else 'tuple'], ''), + ({'return': 'typing.Tuple[int, str]'}, ['int'], 'x()[0]'), + ({'return': 'typing.Tuple[int, str]'}, ['str'], 'x()[1]'), + ({'return': 'typing.Tuple[int, str]'}, [], 'x()[2]'), + + ({'return': 'typing.List'}, ['list'], 'list'), + ({'return': 'typing.List[int]'}, ['list'], 'list'), + ({'return': 'typing.List[int]'}, ['int'], 'x()[0]'), + ({'return': 'typing.List[int, str]'}, [], 'x()[0]'), + + ({'return': 'typing.Iterator[int]'}, [], 'x()[0]'), + ({'return': 'typing.Iterator[int]'}, ['int'], 'next(x())'), + ({'return': 'typing.Iterable[float]'}, ['float'], 'list(x())[0]'), + + ({'return': 'decimal.Decimal'}, [], ''), + ({'return': 'lalalalallalaa'}, [], ''), + ({'return': 'lalalalallalaa.lala'}, [], ''), + ] +) +def test_string_annotation(annotations, result, code): + x = lambda foo: 1 + x.__annotations__ = annotations + defs = jedi.Interpreter(code or 'x()', [locals()]).infer() + assert [d.name for d in defs] == result + + +def test_name_not_inferred_properly(): + """ + In IPython notebook it is typical that some parts of the code that is + provided was already executed. In that case if something is not properly + inferred, it should still infer from the variables it already knows. + """ + x = 1 + d, = jedi.Interpreter('x = UNDEFINED; x', [locals()]).infer() + assert d.name == 'int' + + +def test_variable_reuse(): + x = 1 + d, = jedi.Interpreter('y = x\ny', [locals()]).infer() + assert d.name == 'int' + + +def test_negate(): + code = "x = -y" + x, = jedi.Interpreter(code, [{'y': 3}]).infer(1, 0) + assert x.name == 'int' + value, = x._name.infer() + assert value.get_safe_value() == -3 + + +def test_complete_not_findable_class_source(): + class TestClass(): + ta=1 + ta1=2 + + # Simulate the environment where the class is defined in + # an interactive session and therefore inspect module + # cannot find its source code and raises OSError (Py 3.10+) or TypeError. + TestClass.__module__ = "__main__" + # There is a pytest __main__ module we have to remove temporarily. + module = sys.modules.pop("__main__") + try: + interpreter = jedi.Interpreter("TestClass.", [locals()]) + completions = interpreter.complete(column=10, line=1) + finally: + sys.modules["__main__"] = module + + assert "ta" in [c.name for c in completions] + assert "ta1" in [c.name for c in completions] + + +def test_param_infer_default(): + abs_sig, = jedi.Interpreter('abs(', [{'abs': abs}]).get_signatures() + param, = abs_sig.params + assert param.name == 'x' + assert param.infer_default() == [] diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_api/test_keyword.py b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_keyword.py new file mode 100644 index 000000000..efafdfa3a --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_keyword.py @@ -0,0 +1,51 @@ +""" +Test of keywords and ``jedi.keywords`` +""" + + +def test_goto_keyword(Script): + """ + Bug: goto assignments on ``in`` used to raise AttributeError:: + + 'str' object has no attribute 'generate_call_path' + """ + Script('in').goto() + + +def test_keyword(Script, environment): + """ github jedi-vim issue #44 """ + defs = Script("print").infer() + assert [d.docstring() for d in defs] + + assert Script("import").goto() == [] + + completions = Script("import").complete(1, 1) + assert len(completions) > 10 and 'if' in [c.name for c in completions] + assert Script("assert").infer() == [] + + +def test_keyword_attributes(Script): + def_, = Script('def').complete() + assert def_.name == 'def' + assert def_.complete == '' + assert def_.is_keyword is True + assert def_.is_stub() is False + assert def_.goto(only_stubs=True) == [] + assert def_.goto() == [] + assert def_.infer() == [] + assert def_.parent() is None + assert def_.docstring() + assert def_.description == 'keyword def' + assert def_.get_line_code() == '' + assert def_.full_name is None + assert def_.line is def_.column is None + assert def_.in_builtin_module() is True + assert def_.module_name == 'builtins' + assert 'typeshed' in def_.module_path.parts + assert def_.type == 'keyword' + + +def test_none_keyword(Script, environment): + none, = Script('None').complete() + assert not none.docstring() + assert none.name == 'None' diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_api/test_names.py b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_names.py new file mode 100644 index 000000000..287a301ea --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_names.py @@ -0,0 +1,197 @@ +""" +Tests for `api.names`. +""" + +from textwrap import dedent + +import pytest + + +def _assert_definition_names(definitions, names): + assert [d.name for d in definitions] == names + + +def _check_names(get_names, source, names): + definitions = get_names(dedent(source)) + _assert_definition_names(definitions, names) + return definitions + + +def test_get_definitions_flat(get_names): + _check_names(get_names, """ + import module + class Class: + pass + def func(): + pass + data = None + """, ['module', 'Class', 'func', 'data']) + + +def test_dotted_assignment(get_names): + _check_names(get_names, """ + x = Class() + x.y.z = None + """, ['x', 'z']) # TODO is this behavior what we want? + + +def test_multiple_assignment(get_names): + _check_names(get_names, "x = y = None", ['x', 'y']) + + +def test_multiple_imports(get_names): + _check_names(get_names, """ + from module import a, b + from another_module import * + """, ['a', 'b']) + + +def test_nested_definitions(get_names): + definitions = _check_names(get_names, """ + class Class: + def f(): + pass + def g(): + pass + """, ['Class']) + subdefinitions = definitions[0].defined_names() + _assert_definition_names(subdefinitions, ['f', 'g']) + assert [d.full_name for d in subdefinitions] == ['__main__.Class.f', '__main__.Class.g'] + + +def test_nested_class(get_names): + definitions = _check_names(get_names, """ + class L1: + class L2: + class L3: + def f(): pass + def f(): pass + def f(): pass + def f(): pass + """, ['L1', 'f']) + subdefs = definitions[0].defined_names() + subsubdefs = subdefs[0].defined_names() + _assert_definition_names(subdefs, ['L2', 'f']) + _assert_definition_names(subsubdefs, ['L3', 'f']) + _assert_definition_names(subsubdefs[0].defined_names(), ['f']) + + +def test_class_fields_with_all_scopes_false(get_names): + definitions = _check_names(get_names, """ + from module import f + g = f(f) + class C: + h = g + + def foo(x=a): + bar = x + return bar + """, ['f', 'g', 'C', 'foo']) + C_subdefs = definitions[-2].defined_names() + foo_subdefs = definitions[-1].defined_names() + _assert_definition_names(C_subdefs, ['h']) + _assert_definition_names(foo_subdefs, ['x', 'bar']) + + +def test_async_stmt_with_all_scopes_false(get_names): + definitions = _check_names(get_names, """ + from module import f + import asyncio + + g = f(f) + class C: + h = g + def __init__(self): + pass + + async def __aenter__(self): + pass + + def foo(x=a): + bar = x + return bar + + async def async_foo(duration): + async def wait(): + await asyncio.sleep(100) + for i in range(duration//100): + await wait() + return duration//100*100 + + async with C() as cinst: + d = cinst + """, ['f', 'asyncio', 'g', 'C', 'foo', 'async_foo', 'cinst', 'd']) + C_subdefs = definitions[3].defined_names() + foo_subdefs = definitions[4].defined_names() + async_foo_subdefs = definitions[5].defined_names() + cinst_subdefs = definitions[6].defined_names() + _assert_definition_names(C_subdefs, ['h', '__init__', '__aenter__']) + _assert_definition_names(foo_subdefs, ['x', 'bar']) + _assert_definition_names(async_foo_subdefs, ['duration', 'wait', 'i']) + # We treat d as a name outside `async with` block + _assert_definition_names(cinst_subdefs, []) + + +def test_follow_imports(get_names): + # github issue #344 + imp = get_names('import datetime')[0] + assert imp.name == 'datetime' + datetime_names = [str(d.name) for d in imp.defined_names()] + assert 'timedelta' in datetime_names + + +def test_names_twice(get_names): + code = dedent(''' + def lol(): + pass + ''') + + defs = get_names(code) + assert defs[0].defined_names() == [] + + +def test_simple_name(get_names): + defs = get_names('foo', references=True) + assert not defs[0]._name.infer() + + +def test_no_error(get_names): + code = dedent(""" + def foo(a, b): + if a == 10: + if b is None: + print("foo") + a = 20 + """) + func_name, = get_names(code) + a, b, a20 = func_name.defined_names() + assert a.name == 'a' + assert b.name == 'b' + assert a20.name == 'a' + assert a20.goto() == [a20] + + +@pytest.mark.parametrize( + 'code, index, is_side_effect', [ + ('x', 0, False), + ('x.x', 0, False), + ('x.x', 1, False), + ('x.x = 3', 0, False), + ('x.x = 3', 1, True), + ('def x(x): x.x = 3', 1, False), + ('def x(x): x.x = 3', 3, True), + ('import sys; sys.path', 0, False), + ('import sys; sys.path', 1, False), + ('import sys; sys.path', 2, False), + ('import sys; sys.path = []', 2, True), + ] +) +def test_is_side_effect(get_names, code, index, is_side_effect): + names = get_names(code, references=True, all_scopes=True) + assert names[index].is_side_effect() == is_side_effect + + +def test_no_defined_names(get_names): + definition, = get_names("x = (1, 2)") + + assert not definition.defined_names() diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_api/test_project.py b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_project.py new file mode 100644 index 000000000..f2a5e9910 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_project.py @@ -0,0 +1,185 @@ +import os +from pathlib import Path + +import pytest + +from ..helpers import get_example_dir, set_cwd, root_dir, test_dir +from jedi import Interpreter +from jedi.api import Project, get_default_project +from jedi.api.project import _is_potential_project, _CONTAINS_POTENTIAL_PROJECT + + +def test_django_default_project(Script): + dir = get_example_dir('django') + + script = Script( + "from app import models\nmodels.SomeMo", + path=os.path.join(dir, 'models/x.py') + ) + c, = script.complete() + assert c.name == "SomeModel" + + project = script._inference_state.project + assert project._django is True + assert project.sys_path is None + assert project.smart_sys_path is True + assert project.load_unsafe_extensions is False + + +def test_django_default_project_of_file(Script): + project = get_default_project(__file__) + assert project._path == Path(__file__).parent.parent.parent + + +def test_interpreter_project_path(): + # Run from anywhere it should be the cwd. + dir = Path(root_dir).joinpath('test') + with set_cwd(dir): + project = Interpreter('', [locals()])._inference_state.project + assert project._path == dir + + +def test_added_sys_path(inference_state): + project = get_default_project() + p = '/some_random_path' + project.added_sys_path = [p] + assert p in project._get_sys_path(inference_state) + + +def test_load_save_project(tmpdir): + project = Project(tmpdir.strpath, added_sys_path=['/foo']) + project.save() + + loaded = Project.load(tmpdir.strpath) + assert loaded.added_sys_path == ['/foo'] + + +@pytest.mark.parametrize( + 'string, full_names, kwargs', [ + ('test_load_save_project', ['test_api.test_project.test_load_save_project'], {}), + ('test_load_savep', [], dict(complete=True)), + ('test_load_save_p', ['test_api.test_project.test_load_save_project'], + dict(complete=True)), + ('test_load_save_p', ['test_api.test_project.test_load_save_project'], + dict(complete=True, all_scopes=True)), + + ('some_search_test_var', [], {}), + ('some_search_test_var', ['test_api.test_project.test_search.some_search_test_var'], + dict(all_scopes=True)), + ('some_search_test_var', ['test_api.test_project.test_search.some_search_test_var'], + dict(complete=True, all_scopes=True)), + # Make sure that the searched name is not part of the file, by + # splitting it up. + ('some_search_test_v' + 'a', ['test_api.test_project.test_search.some_search_test_var'], + dict(complete=True, all_scopes=True)), + + ('sample_int', ['helpers.sample_int'], {}), + ('sample_int', ['helpers.sample_int'], dict(all_scopes=True)), + ('sample_int.real', ['stub:builtins.int.real'], {}), + + ('class sample_int.real', [], {}), + ('foo sample_int.real', [], {}), + ('def sample_int.to_bytes', ['stub:builtins.int.to_bytes'], {}), + ('function sample_int.to_bytes', ['stub:builtins.int.to_bytes'], {}), + ('property sample_int.real', ['stub:builtins.int.real'], {}), + + # With modules + ('test_project.test_search', ['test_api.test_project.test_search'], {}), + ('test_project.test_searc', ['test_api.test_project.test_search'], dict(complete=True)), + ('test_api.test_project.test_search', ['test_api.test_project.test_search'], {}), + ('test_api.test_project.test_sear', ['test_api.test_project.test_search'], + dict(complete=True)), + + # With namespace + ('implicit_namespace_package.ns1.pkg', + ['examples.implicit_namespace_package.ns1.pkg'], {}), + ('implicit_namespace_package.ns1.pkg.ns1_file', + ['examples.implicit_namespace_package.ns1.pkg.ns1_file'], {}), + ('examples.implicit_namespace_package.ns1.pkg.ns1_file', + ['examples.implicit_namespace_package.ns1.pkg.ns1_file'], {}), + ('implicit_namespace_package.ns1.pkg.', + ['examples.implicit_namespace_package.ns1.pkg.ns1_file'], + dict(complete=True)), + ('implicit_namespace_package.', + ['examples.implicit_namespace_package.ns1', + 'examples.implicit_namespace_package.ns2'], + dict(complete=True)), + + # With stubs + ('with_python.module', ['examples.stub_packages.with_python.module'], {}), + ('with_python.modul', ['examples.stub_packages.with_python.module'], + dict(complete=True)), + ('no_python.foo', ['stub:examples.stub_packages.no_python.foo'], {}), + ('no_python.fo', ['stub:examples.stub_packages.no_python.foo'], + dict(complete=True)), + ('with_python-stubs.module', [], {}), + ('no_python-stubs.foo', [], {}), + # Both locations are given, because they live in separate folders (one + # suffixed with -stubs. + ('with_python', ['examples.stub_packages.with_python'], {}), + ('no_python', ['stub:examples.stub_packages.no_python'], {}), + # Completion stubs + ('stub_only', ['stub:completion.stub_folder.stub_only', + 'stub:examples.stub_packages.with_python.stub_only'], {}), + ('with_stub', ['completion.stub_folder.with_stub'], {}), + ('with_stub.in_with_stub_both', + ['completion.stub_folder.with_stub.in_with_stub_both'], {}), + ('with_stub.in_with_stub_python', + ['completion.stub_folder.with_stub.in_with_stub_python'], {}), + ('with_stub.in_with_stub_stub', + ['stub:completion.stub_folder.with_stub.in_with_stub_stub'], {}), + # Completion stubs: Folder + ('with_stub_folder', ['completion.stub_folder.with_stub_folder'], {}), + ('with_stub_folder.nested_with_stub', + ['completion.stub_folder.with_stub_folder.nested_with_stub'], {}), + ('nested_with_stub', + ['completion.stub_folder.stub_only_folder.nested_with_stub', + 'completion.stub_folder.with_stub_folder.nested_with_stub'], {}), + + # On sys path + ('sys.path', ['stub:sys.path'], {}), + ('json.dumps', ['json.dumps'], {}), # stdlib + stub + ('multiprocessing', ['multiprocessing'], {}), + ('multiprocessin', ['multiprocessing'], dict(complete=True)), + ] +) +def test_search(string, full_names, kwargs): + some_search_test_var = 1.0 + project = Project(test_dir) + if kwargs.pop('complete', False) is True: + defs = project.complete_search(string, **kwargs) + else: + defs = project.search(string, **kwargs) + assert sorted([('stub:' if d.is_stub() else '') + (d.full_name or d.name) for d in defs]) == full_names + + +@pytest.mark.parametrize( + 'string, completions, all_scopes', [ + ('SomeCl', ['ass'], False), + ('twic', [], False), + ('twic', ['e', 'e'], True), + ('test_load_save_p', ['roject'], False), + ] +) +def test_complete_search(Script, string, completions, all_scopes): + project = Project(test_dir) + defs = project.complete_search(string, all_scopes=all_scopes) + assert [d.complete for d in defs] == completions + + +@pytest.mark.parametrize( + 'path,expected', [ + (Path(__file__).parents[2], True), # The path of the project + (Path(__file__).parents[1], False), # The path of the tests, not a project + (Path.home(), None) + ] +) +def test_is_potential_project(path, expected): + + if expected is None: + try: + expected = _CONTAINS_POTENTIAL_PROJECT in os.listdir(path) + except OSError: + expected = False + + assert _is_potential_project(path) == expected diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_api/test_refactoring.py b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_refactoring.py new file mode 100644 index 000000000..c229c1294 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_refactoring.py @@ -0,0 +1,72 @@ +import os +from textwrap import dedent +from pathlib import Path + +import pytest + +import jedi + + +@pytest.fixture() +def dir_with_content(tmpdir): + with open(os.path.join(tmpdir.strpath, 'modx.py'), 'w', newline='') as f: + f.write('import modx\nfoo\n') # self reference + return Path(tmpdir.strpath) + + +def test_rename_mod(Script, dir_with_content): + script = Script( + 'import modx; modx\n', + path=dir_with_content.joinpath('some_script.py'), + project=jedi.Project(dir_with_content), + ) + refactoring = script.rename(line=1, new_name='modr') + refactoring.apply() + + p1 = dir_with_content.joinpath('modx.py') + p2 = dir_with_content.joinpath('modr.py') + expected_code = 'import modr\nfoo\n' + assert not p1.exists() + with open(p2, newline='') as f: + assert f.read() == expected_code + + assert refactoring.get_renames() == [(p1, p2)] + + assert refactoring.get_changed_files()[p1].get_new_code() == expected_code + + assert refactoring.get_diff() == dedent('''\ + rename from modx.py + rename to modr.py + --- modx.py + +++ modr.py + @@ -1,3 +1,3 @@ + -import modx + +import modr + foo + --- some_script.py + +++ some_script.py + @@ -1,2 +1,2 @@ + -import modx; modx + +import modr; modr + ''').format(dir=dir_with_content) + + +def test_rename_none_path(Script): + refactoring = Script('foo', path=None).rename(new_name='bar') + with pytest.raises(jedi.RefactoringError, match='on a Script with path=None'): + refactoring.apply() + assert refactoring + + +def test_diff_without_ending_newline(Script): + refactoring = Script('a = 1\nb\na').rename(1, 0, new_name='c') + assert refactoring.get_diff() == dedent('''\ + --- + +++ + @@ -1,3 +1,3 @@ + -a = 1 + +c = 1 + b + -a + +c + ''') diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_api/test_search.py b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_search.py new file mode 100644 index 000000000..20bb7286a --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_search.py @@ -0,0 +1,89 @@ +import os +import sys + +import pytest + + +class SomeClass: + class SomeClass: + def twice(self, a): + something = os + return something + + def twice(self, b): + pass + + def some_function(): + pass + + +@pytest.mark.parametrize( + 'string, descriptions, kwargs', [ + # No completions + ('SomeClass', ['class SomeClass'], {}), + ('SomeClass', ['class SomeClass', 'class SomeClass.SomeClass'], dict(all_scopes=True)), + ('Some', [], dict(all_scopes=True)), + ('os', ['module os'], {}), + ('sys', ['module sys'], {}), + ('sys.path', ['statement sys.path'], {}), + ('sys.exit', ['function sys.exit'], {}), + ('something', [], {}), + ('something', ['statement SomeClass.SomeClass.twice.something'], dict(all_scopes=True)), + + # Completions + ('class Some', ['class SomeClass', 'class SomeClass.SomeClass'], + dict(all_scopes=True, complete=True)), + ('class Some', ['class SomeClass'], dict(complete=True)), + ('Some', ['class SomeClass', 'class SomeClass.SomeClass', + 'statement SomeClass.SomeClass.twice.something', + 'function SomeClass.some_function'], dict(all_scopes=True, complete=True)), + ('some', ['class SomeClass', 'class SomeClass.SomeClass', + 'statement SomeClass.SomeClass.twice.something', + 'function SomeClass.some_function'], dict(all_scopes=True, complete=True)), + + # Fuzzy + ('class Smelss', ['class SomeClass'], dict(complete=True, fuzzy=True)), + ('class Smelss', ['class SomeClass', 'class SomeClass.SomeClass'], + dict(complete=True, fuzzy=True, all_scopes=True)), + + # Nested + ('SomeClass.SomeClass', ['class SomeClass.SomeClass'], + dict(all_scopes=True)), + ('SomeClass.SomeClass.twice', ['function SomeClass.SomeClass.twice'], + dict(all_scopes=True)), + ('SomeClass.SomeClass.twice.__call__', ['function types.FunctionType.__call__'], + dict(all_scopes=True)), + ('SomeClass.SomeClass.twice.something', [], dict(all_scopes=True)), + ('SomeClass.twice', ['function SomeClass.twice', 'function SomeClass.SomeClass.twice'], + dict(all_scopes=True)), + + # Nested completions + ('SomeClass.twi', ['function SomeClass.twice', 'function SomeClass.SomeClass.twice'], + dict(all_scopes=True, complete=True)), + + # Fuzzy unfortunately doesn't work + ('SomeCl.twice', [], dict(all_scopes=True, complete=True, fuzzy=True)), + ] +) +def test_simple_search(Script, string, descriptions, kwargs): + if kwargs.pop('complete', False) is True: + defs = Script(path=__file__).complete_search(string, **kwargs) + else: + defs = Script(path=__file__).search(string, **kwargs) + this_mod = 'test.test_api.test_search.' + assert [d.type + ' ' + d.full_name.replace(this_mod, '') for d in defs] == descriptions + + +@pytest.mark.parametrize( + 'string, completions, fuzzy, all_scopes', [ + ('SomeCl', ['ass'], False, False), + ('SomeCl', [None], True, False), + ('twic', [], False, False), + ('some_f', [], False, False), + ('twic', ['e', 'e'], False, True), + ('some_f', ['unction'], False, True), + ] +) +def test_complete_search(Script, string, completions, fuzzy, all_scopes): + defs = Script(path=__file__).complete_search(string, fuzzy=fuzzy, all_scopes=all_scopes) + assert [d.complete for d in defs] == completions diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_api/test_settings.py b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_settings.py new file mode 100644 index 000000000..7924f30cc --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_settings.py @@ -0,0 +1,12 @@ +from jedi import api + + +def test_add_bracket_after_function(monkeypatch, Script): + settings = api.settings + monkeypatch.setattr(settings, 'add_bracket_after_function', True) + script = Script('''\ +def foo(): + pass +foo''') + completions = script.complete() + assert completions[0].complete == '(' diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_api/test_signatures.py b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_signatures.py new file mode 100644 index 000000000..a211c1e3c --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_signatures.py @@ -0,0 +1,71 @@ +import pytest + +_tuple_code = 'from typing import Tuple\ndef f(x: Tuple[int]): ...\nf' + + +@pytest.mark.parametrize( + 'code, expected_params, execute_annotation', [ + ('def f(x: 1, y): ...\nf', [None, None], True), + ('def f(x: 1, y): ...\nf', ['instance int', None], False), + ('def f(x: int): ...\nf', ['instance int'], True), + ('from typing import List\ndef f(x: List[int]): ...\nf', ['instance list'], True), + ('from typing import List\ndef f(x: List[int]): ...\nf', ['class list'], False), + (_tuple_code, ['instance tuple'], True), + (_tuple_code, ['class Tuple'], False), + ('x=str\ndef f(p: x): ...\nx=int\nf', ['instance int'], True), + + ('def f(*args, **kwargs): ...\nf', [None, None], False), + ('def f(*args: int, **kwargs: str): ...\nf', ['class int', 'class str'], False), + ] +) +def test_param_annotation(Script, code, expected_params, execute_annotation): + func, = Script(code).goto() + sig, = func.get_signatures() + for p, expected in zip(sig.params, expected_params): + annotations = p.infer_annotation(execute_annotation=execute_annotation) + if expected is None: + assert not annotations + else: + annotation, = annotations + assert annotation.description == expected + + +@pytest.mark.parametrize( + 'code, expected_params', [ + ('def f(x=1, y=int, z): pass\nf', ['instance int', 'class int', None]), + ('def f(*args, **kwargs): pass\nf', [None, None]), + ('x=1\ndef f(p=x): pass\nx=""\nf', ['instance int']), + ] +) +def test_param_default(Script, code, expected_params): + func, = Script(code).goto() + sig, = func.get_signatures() + for p, expected in zip(sig.params, expected_params): + annotations = p.infer_default() + if expected is None: + assert not annotations + else: + annotation, = annotations + assert annotation.description == expected + + +@pytest.mark.parametrize( + 'code, index, param_code, kind', [ + ('def f(x=1): pass\nf', 0, 'x=1', 'POSITIONAL_OR_KEYWORD'), + ('def f(*args:int): pass\nf', 0, '*args: int', 'VAR_POSITIONAL'), + ('def f(**kwargs: List[x]): pass\nf', 0, '**kwargs: List[x]', 'VAR_KEYWORD'), + ('def f(*, x:int=5): pass\nf', 0, 'x: int=5', 'KEYWORD_ONLY'), + ('def f(*args, x): pass\nf', 1, 'x', 'KEYWORD_ONLY'), + ] +) +def test_param_kind_and_name(code, index, param_code, kind, Script): + func, = Script(code).goto() + sig, = func.get_signatures() + param = sig.params[index] + assert param.to_string() == param_code + assert param.kind.name == kind + + +def test_staticmethod(Script): + s, = Script('staticmethod(').get_signatures() + assert s.to_string() == 'staticmethod(f: Callable[..., Any])' diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_api/test_syntax_errors.py b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_syntax_errors.py new file mode 100644 index 000000000..f604f1230 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_syntax_errors.py @@ -0,0 +1,55 @@ +""" +These tests test Jedi's Parso usage. Basically there's not a lot of tests here, +because we're just checking if the API works. Bugfixes should be done in parso, +mostly. +""" + +from textwrap import dedent + +import pytest + + +@pytest.mark.parametrize( + 'code, line, column, until_line, until_column, message', [ + ('?\n', 1, 0, 1, 1, 'SyntaxError: invalid syntax'), + ('x %% y', 1, 3, 1, 4, 'SyntaxError: invalid syntax'), + ('"""\n\n', 1, 0, 3, 0, 'SyntaxError: EOF while scanning triple-quoted string literal'), + ('(1, 2\n', 2, 0, 2, 0, 'SyntaxError: invalid syntax'), + ('foo(1, 2\ndef x(): pass', 2, 0, 2, 3, 'SyntaxError: invalid syntax'), + ] +) +def test_simple_syntax_errors(Script, code, line, column, until_line, until_column, message): + e, = Script(code).get_syntax_errors() + assert e.line == line + assert e.column == column + assert e.until_line == until_line + assert e.until_column == until_column + assert e.get_message() == message + + +@pytest.mark.parametrize( + 'code', [ + 'x % y', + 'def x(x): pass', + 'def x(x):\n pass', + ] +) +def test_no_syntax_errors(Script, code): + assert not Script(code).get_syntax_errors() + + +def test_multi_syntax_error(Script): + code = dedent('''\ + def x(): + 1 + def y() + 1 + 1 + 1 *** 3 + ''') + x, y, power = Script(code).get_syntax_errors() + assert x.line == 2 + assert x.column == 0 + assert y.line == 3 + assert y.column == 7 + assert power.line == 5 + assert power.column == 4 diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_api/test_unicode.py b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_unicode.py new file mode 100644 index 000000000..722adb21c --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_unicode.py @@ -0,0 +1,69 @@ +""" +All character set and unicode related tests. +""" +from jedi import Project + + +def test_unicode_script(Script): + s = "import datetime; datetime.timedelta" + completions = Script(s).complete() + assert len(completions) + assert type(completions[0].description) is str + + s = "author='öä'; author" + completions = Script(s).complete() + x = completions[0].description + assert type(x) is str + + s = "#-*- coding: iso-8859-1 -*-\nauthor='öä'; author" + s = s.encode('latin-1') + completions = Script(s).complete() + assert type(completions[0].description) is str + + +def test_unicode_attribute(Script): + """ github jedi-vim issue #94 """ + s1 = ('#-*- coding: utf-8 -*-\nclass Person():\n' + ' name = "e"\n\nPerson().name.') + completions1 = Script(s1).complete() + assert 'strip' in [c.name for c in completions1] + s2 = ('#-*- coding: utf-8 -*-\nclass Person():\n' + ' name = "é"\n\nPerson().name.') + completions2 = Script(s2).complete() + assert 'strip' in [c.name for c in completions2] + + +def test_multibyte_script(Script): + """ `jedi.Script` must accept multi-byte string source. """ + code = "import datetime; datetime.d" + comment = "# multi-byte comment あいうえおä" + s = ('%s\n%s') % (code, comment) + assert len(Script(s).complete(1, len(code))) + + +def test_goto_definition_at_zero(Script): + """Infer at zero sometimes raises issues.""" + assert Script("a").infer(1, 1) == [] + s = Script("str").infer(1, 1) + assert len(s) == 1 + assert list(s)[0].description == 'class str' + assert Script("").infer(1, 0) == [] + + +def test_complete_at_zero(Script): + s = Script("str").complete(1, 3) + assert len(s) == 1 + assert list(s)[0].name == 'str' + + s = Script("").complete(1, 0) + assert len(s) > 0 + + +def test_wrong_encoding(Script, tmpdir): + x = tmpdir.join('x.py') + # Use both latin-1 and utf-8 (a really broken file). + x.write_binary('foobar = 1\nä'.encode('latin-1') + 'ä'.encode('utf-8')) + + project = Project(tmpdir.strpath) + c, = Script('import x; x.foo', project=project).complete() + assert c.name == 'foobar' diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_api/test_usages.py b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_usages.py new file mode 100644 index 000000000..93edd34de --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_api/test_usages.py @@ -0,0 +1,64 @@ +import pytest + +from ..helpers import test_dir + + +def test_import_references(Script): + s = Script("from .. import foo", path=test_dir.joinpath("foo.py")) + assert [usage.line for usage in s.get_references()] == [1] + + +def test_exclude_builtin_modules(Script): + def get(include): + references = Script(source).get_references(include_builtins=include) + return [(d.line, d.column) for d in references] + source = '''import sys\nsys.setprofile''' + places = get(include=True) + assert len(places) >= 3 # Includes stubs, the reference itself and the builtin + + places = get(include=False) + # Just the reference + assert places == [(2, 4)] + + +@pytest.mark.parametrize('code, places', [ + ('', [(1, 7), (4, 6)]), + ('', [(2, 5)]), + ('', [(2, 24), (7, 10), (11, 10)]), + ('', [(6, 4), (14, 0)]), + ('', [(7, 4), (8, 11)]), + ('', [(7, 22), (11, 22)]), + ('', [(11, 4), (12, 11)]), + ('from datetime', [(1, 5)]), + ('''from datetime import datetime +d1 = datetime.now() +d2 = datetime.now() +''', [(2, 14), (3, 14)]), + ('''from datetime import timedelta +t1 = timedelta(seconds=1) +t2 = timedelta(seconds=2) +''', [(2, 15), (3, 15)]) +]) +def test_references_scope(Script, code, places): + if not code: + code = '''import sys +from collections import defaultdict + +print(sys.path) + +def foo(bar): + baz = defaultdict(int) + return baz + +def bar(foo): + baz = defaultdict(int) + return baz + +foo() +''' + from jedi.api.project import Project + project = Project('', sys_path=[], smart_sys_path=False) + script = Script(code, project=project) + + for place in places: + assert places == [(n.line, n.column) for n in script.get_references(scope='file', *place)] diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_cache.py b/bundle/jedi-vim/pythonx/jedi/test/test_cache.py new file mode 100644 index 000000000..896ff4516 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_cache.py @@ -0,0 +1,29 @@ +""" +Test all things related to the ``jedi.cache`` module. +""" + + +def test_cache_get_signatures(Script): + """ + See github issue #390. + """ + def check(column, call_name, path=None): + assert Script(s, path=path).get_signatures(1, column)[0].name == call_name + + s = 'str(int())' + + for i in range(3): + check(8, 'int') + check(4, 'str') + # Can keep doing these calls and always get the right result. + + # Now lets specify a source_path of boo and alternate these calls, it + # should still work. + for i in range(3): + check(8, 'int', 'boo') + check(4, 'str', 'boo') + + +def test_cache_line_split_issues(Script): + """Should still work even if there's a newline.""" + assert Script('int(\n').get_signatures()[0].name == 'int' diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_debug.py b/bundle/jedi-vim/pythonx/jedi/test/test_debug.py new file mode 100644 index 000000000..00331072c --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_debug.py @@ -0,0 +1,9 @@ +import jedi +from jedi import debug + +def test_simple(): + jedi.set_debug_function() + debug.speed('foo') + debug.dbg('bar') + debug.warning('baz') + jedi.set_debug_function(None, False, False, False) diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_file_io.py b/bundle/jedi-vim/pythonx/jedi/test/test_file_io.py new file mode 100644 index 000000000..bbf2170b6 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_file_io.py @@ -0,0 +1,27 @@ +from os.path import join +from jedi.file_io import FolderIO +from test.helpers import get_example_dir + + +def test_folder_io_walk(): + root_dir = get_example_dir('namespace_package') + iterator = FolderIO(root_dir).walk() + root, folder_ios, file_ios = next(iterator) + assert {f.path for f in folder_ios} == {join(root_dir, 'ns1'), join(root_dir, 'ns2')} + for f in list(folder_ios): + if f.path.endswith('ns1'): + folder_ios.remove(f) + + root, folder_ios, file_ios = next(iterator) + assert folder_ios + assert root.path == join(root_dir, 'ns2') + folder_ios.clear() + assert next(iterator, None) is None + + +def test_folder_io_walk2(): + root_dir = get_example_dir('namespace_package') + iterator = FolderIO(root_dir).walk() + root, folder_ios, file_ios = next(iterator) + folder_ios.clear() + assert next(iterator, None) is None diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_annotations.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_annotations.py new file mode 100644 index 000000000..879b2ec5d --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_annotations.py @@ -0,0 +1,55 @@ +from textwrap import dedent + +import pytest + + +def test_simple_annotations(Script, environment): + """ + Annotations only exist in Python 3. + If annotations adhere to PEP-0484, we use them (they override inference), + else they are parsed but ignored + """ + source = dedent("""\ + def annot(a:3): + return a + + annot('')""") + + assert [d.name for d in Script(source).infer()] == ['str'] + + source = dedent("""\ + + def annot_ret(a:3) -> 3: + return a + + annot_ret('')""") + assert [d.name for d in Script(source).infer()] == ['str'] + + source = dedent("""\ + def annot(a:int): + return a + + annot('')""") + + assert [d.name for d in Script(source).infer()] == ['int'] + + +@pytest.mark.parametrize('reference', [ + 'assert 1', + '1', + 'def x(): pass', + '1, 2', + r'1\n' +]) +def test_illegal_forward_references(Script, environment, reference): + source = 'def foo(bar: "%s"): bar' % reference + + assert not Script(source).infer() + + +def test_lambda_forward_references(Script, environment): + source = 'def foo(bar: "lambda: 3"): bar' + + # For now just receiving the 3 is ok. I'm doubting that this is what we + # want. We also execute functions. Should we only execute classes? + assert Script(source).infer() diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_buildout_detection.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_buildout_detection.py new file mode 100644 index 000000000..01d56a78b --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_buildout_detection.py @@ -0,0 +1,84 @@ +import os +from textwrap import dedent +from pathlib import Path + +from jedi.inference.sys_path import _get_parent_dir_with_file, \ + _get_buildout_script_paths, check_sys_path_modifications + +from ..helpers import get_example_dir + + +def check_module_test(Script, code): + module_context = Script(code)._get_module_context() + return check_sys_path_modifications(module_context) + + +def test_parent_dir_with_file(Script): + path = Path(get_example_dir('buildout_project', 'src', 'proj_name')) + parent = _get_parent_dir_with_file(path, 'buildout.cfg') + assert parent is not None + assert str(parent).endswith(os.path.join('test', 'examples', 'buildout_project')) + + +def test_buildout_detection(Script): + path = Path(get_example_dir('buildout_project', 'src', 'proj_name')) + paths = list(_get_buildout_script_paths(path.joinpath('module_name.py'))) + assert len(paths) == 1 + appdir_path = os.path.normpath(os.path.join(path, '../../bin/app')) + assert str(paths[0]) == appdir_path + + +def test_append_on_non_sys_path(Script): + code = dedent(""" + class Dummy(object): + path = [] + + d = Dummy() + d.path.append('foo')""") + + paths = check_module_test(Script, code) + assert not paths + assert 'foo' not in paths + + +def test_path_from_invalid_sys_path_assignment(Script): + code = dedent(""" + import sys + sys.path = 'invalid'""") + + paths = check_module_test(Script, code) + assert not paths + assert 'invalid' not in paths + + +def test_sys_path_with_modifications(Script): + path = get_example_dir('buildout_project', 'src', 'proj_name', 'module_name.py') + code = dedent(""" + import os + """) + + paths = Script(code, path=path)._inference_state.get_sys_path() + assert os.path.abspath('/tmp/.buildout/eggs/important_package.egg') in paths + + +def test_path_from_sys_path_assignment(Script): + code = dedent(f""" + #!/usr/bin/python + + import sys + sys.path[0:0] = [ + {os.path.abspath('/usr/lib/python3.8/site-packages')!r}, + {os.path.abspath('/home/test/.buildout/eggs/important_package.egg')!r}, + ] + + path[0:0] = [1] + + import important_package + + if __name__ == '__main__': + sys.exit(important_package.main())""") + + paths = check_module_test(Script, code) + assert 1 not in paths + assert os.path.abspath('/home/test/.buildout/eggs/important_package.egg') \ + in map(str, paths) diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_compiled.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_compiled.py new file mode 100644 index 000000000..e0315e52c --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_compiled.py @@ -0,0 +1,169 @@ +from textwrap import dedent +import math +from collections import Counter +from datetime import datetime + +import pytest + +from jedi.inference import compiled +from jedi.inference.compiled.access import DirectObjectAccess +from jedi.inference.gradual.conversion import _stub_to_python_value_set +from jedi.inference.syntax_tree import _infer_comparison_part + + +def test_simple(inference_state, environment): + obj = compiled.create_simple_object(inference_state, '_str_') + upper, = obj.py__getattribute__('upper') + objs = list(upper.execute_with_values()) + assert len(objs) == 1 + assert objs[0].name.string_name == 'str' + + +def test_builtin_loading(inference_state): + string, = inference_state.builtins_module.py__getattribute__('str') + from_name, = string.py__getattribute__('__init__') + assert from_name.tree_node + assert not from_name.py__doc__() # It's a stub + + +def test_next_docstr(inference_state): + next_ = compiled.builtin_from_name(inference_state, 'next') + assert next_.tree_node is not None + assert next_.py__doc__() == '' # It's a stub + for non_stub in _stub_to_python_value_set(next_): + assert non_stub.py__doc__() == next.__doc__ + + +def test_parse_function_doc_illegal_docstr(): + docstr = """ + test_func(o + + doesn't have a closing bracket. + """ + assert ('', '') == compiled.value._parse_function_doc(docstr) + + +def test_doc(inference_state): + """ + Even CompiledValue docs always return empty docstrings - not None, that's + just a Jedi API definition. + """ + str_ = compiled.create_simple_object(inference_state, '') + # Equals `''.__getnewargs__` + obj, = str_.py__getattribute__('__getnewargs__') + assert obj.py__doc__() == '' + + +def test_string_literals(Script, environment): + def typ(string): + d = Script("a = %s; a" % string).infer()[0] + return d.name + + assert typ('""') == 'str' + assert typ('r""') == 'str' + assert typ('br""') == 'bytes' + assert typ('b""') == 'bytes' + assert typ('u""') == 'str' + + +def test_method_completion(Script, environment): + code = dedent(''' + class Foo: + def bar(self): + pass + + foo = Foo() + foo.bar.__func__''') + assert [c.name for c in Script(code).complete()] == ['__func__'] + + +def test_time_docstring(Script): + import time + comp, = Script('import time\ntime.sleep').complete() + assert comp.docstring(raw=True) == time.sleep.__doc__ + expected = 'sleep(secs: float) -> None\n\n' + time.sleep.__doc__ + assert comp.docstring() == expected + + +def test_dict_values(Script, environment): + assert Script('import sys\nsys.modules["alshdb;lasdhf"]').infer() + + +def test_getitem_on_none(Script): + script = Script('None[1j]') + assert not script.infer() + issue, = script._inference_state.analysis + assert issue.name == 'type-error-not-subscriptable' + + +def _return_int(): + return 1 + + +@pytest.mark.parametrize( + 'attribute, expected_name, expected_parent', [ + ('x', 'int', 'builtins'), + ('y', 'int', 'builtins'), + ('z', 'bool', 'builtins'), + ('cos', 'cos', 'math'), + ('dec', 'Decimal', 'decimal'), + ('dt', 'datetime', 'datetime'), + ('ret_int', '_return_int', 'test.test_inference.test_compiled'), + ] +) +def test_parent_context(same_process_inference_state, attribute, expected_name, expected_parent): + import decimal + + class C: + x = 1 + y = int + z = True + cos = math.cos + dec = decimal.Decimal(1) + dt = datetime(2000, 1, 1) + ret_int = _return_int + + o = compiled.CompiledValue( + same_process_inference_state, + DirectObjectAccess(same_process_inference_state, C) + ) + x, = o.py__getattribute__(attribute) + assert x.py__name__() == expected_name + module_name = x.parent_context.py__name__() + assert module_name == expected_parent + assert x.parent_context.parent_context is None + + +@pytest.mark.parametrize( + 'obj, expected_names', [ + ('', ['str']), + (str, ['str']), + (''.upper, ['str', 'upper']), + (str.upper, ['str', 'upper']), + + (math.cos, ['cos']), + + (Counter, ['Counter']), + (Counter(""), ['Counter']), + (Counter.most_common, ['Counter', 'most_common']), + (Counter("").most_common, ['Counter', 'most_common']), + ] +) +def test_qualified_names(same_process_inference_state, obj, expected_names): + o = compiled.CompiledValue( + same_process_inference_state, + DirectObjectAccess(same_process_inference_state, obj) + ) + assert o.get_qualified_names() == tuple(expected_names) + + +def test_operation(Script, inference_state, create_compiled_object): + b = create_compiled_object(bool) + false, true = _infer_comparison_part( + inference_state, b.parent_context, + left=list(b.execute_with_values())[0], + operator='is not', + right=b, + ) + assert false.py__name__() == 'bool' + assert true.py__name__() == 'bool' diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_context.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_context.py new file mode 100644 index 000000000..843047034 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_context.py @@ -0,0 +1,18 @@ +def test_module_attributes(Script): + def_, = Script('__name__').complete() + assert def_.name == '__name__' + assert def_.line is None + assert def_.column is None + str_, = def_.infer() + assert str_.name == 'str' + + +def test_module__file__(Script, environment): + assert not Script('__file__').infer() + def_, = Script('__file__', path='example.py').infer() + value = def_._name._value.get_safe_value() + assert value.endswith('example.py') + + def_, = Script('import antigravity; antigravity.__file__').infer() + value = def_._name._value.get_safe_value() + assert value.endswith('.pyi') diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_docstring.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_docstring.py new file mode 100644 index 000000000..85e339337 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_docstring.py @@ -0,0 +1,541 @@ +""" +Testing of docstring related issues and especially ``jedi.docstrings``. +""" + +import os +from textwrap import dedent + +import pytest + +import jedi +from ..helpers import test_dir + +try: + import numpydoc # NOQA +except ImportError: + numpydoc_unavailable = True +else: + numpydoc_unavailable = False + +try: + import numpy # NOQA +except ImportError: + numpy_unavailable = True +else: + numpy_unavailable = False + + +def test_function_doc(Script): + defs = Script(""" + def func(): + '''Docstring of `func`.''' + func""").infer() + assert defs[0].docstring() == 'func()\n\nDocstring of `func`.' + + +def test_class_doc(Script): + defs = Script(""" + class TestClass(): + '''Docstring of `TestClass`.''' + TestClass""").infer() + + expected = 'Docstring of `TestClass`.' + assert defs[0].docstring(raw=True) == expected + assert defs[0].docstring() == 'TestClass()\n\n' + expected + + +def test_class_doc_with_init(Script): + d, = Script(""" + class TestClass(): + '''Docstring''' + def __init__(self, foo, bar=3): pass + TestClass""").infer() + + assert d.docstring() == 'TestClass(foo, bar=3)\n\nDocstring' + + +def test_instance_doc(Script): + defs = Script(""" + class TestClass(): + '''Docstring of `TestClass`.''' + tc = TestClass() + tc""").infer() + assert defs[0].docstring() == 'Docstring of `TestClass`.' + + +def test_multiple_docstrings(Script): + d, = Script(""" + def func(): + '''Original docstring.''' + x = func + '''Docstring of `x`.''' + x""").help() + assert d.docstring() == 'Docstring of `x`.' + + +def test_completion(Script): + assert not Script(''' + class DocstringCompletion(): + #? [] + """ asdfas """''').complete() + + +def test_docstrings_type_dotted_import(Script): + s = """ + def func(arg): + ''' + :type arg: random.Random + ''' + arg.""" + names = [c.name for c in Script(s).complete()] + assert 'seed' in names + + +def test_docstrings_param_type(Script): + s = """ + def func(arg): + ''' + :param str arg: some description + ''' + arg.""" + names = [c.name for c in Script(s).complete()] + assert 'join' in names + + +def test_docstrings_type_str(Script): + s = """ + def func(arg): + ''' + :type arg: str + ''' + arg.""" + + names = [c.name for c in Script(s).complete()] + assert 'join' in names + + +def test_docstring_instance(Script): + # The types hint that it's a certain kind + s = dedent(""" + class A: + def __init__(self,a): + ''' + :type a: threading.Thread + ''' + + if a is not None: + a.start() + + self.a = a + + + def method_b(c): + ''' + :type c: A + ''' + + c.""") + + names = [c.name for c in Script(s).complete()] + assert 'a' in names + assert '__init__' in names + assert 'mro' not in names # Exists only for types. + + +def test_docstring_keyword(Script): + completions = Script('assert').complete() + assert 'assert' in completions[0].docstring() + + +def test_docstring_params_formatting(Script): + defs = Script(""" + def func(param1, + param2, + param3): + pass + func""").infer() + assert defs[0].docstring() == 'func(param1, param2, param3)' + + +def test_import_function_docstring(Script): + code = "from stub_folder import with_stub; with_stub.stub_function" + path = os.path.join(test_dir, 'completion', 'import_function_docstring.py') + c, = Script(code, path=path).complete() + + doc = 'stub_function(x: int, y: float) -> str\n\nPython docstring' + assert c.docstring() == doc + assert c.type == 'function' + func, = c.goto(prefer_stubs=True) + assert func.docstring() == doc + func, = c.goto() + assert func.docstring() == doc + + +# ---- Numpy Style Tests --- + +@pytest.mark.skipif(numpydoc_unavailable, + reason='numpydoc module is unavailable') +def test_numpydoc_parameters(): + s = dedent(''' + def foobar(x, y): + """ + Parameters + ---------- + x : int + y : str + """ + y.''') + names = [c.name for c in jedi.Script(s).complete()] + assert 'isupper' in names + assert 'capitalize' in names + + +@pytest.mark.skipif(numpydoc_unavailable, + reason='numpydoc module is unavailable') +def test_numpydoc_parameters_set_of_values(): + s = dedent(''' + def foobar(x, y): + """ + Parameters + ---------- + x : {'foo', 'bar', 100500}, optional + """ + x.''') + names = [c.name for c in jedi.Script(s).complete()] + assert 'isupper' in names + assert 'capitalize' in names + assert 'numerator' in names + +@pytest.mark.skipif(numpydoc_unavailable, + reason='numpydoc module is unavailable') +def test_numpydoc_parameters_set_single_value(): + """ + This is found in numpy masked-array I'm not too sure what this means but should not crash + """ + s = dedent(''' + def foobar(x, y): + """ + Parameters + ---------- + x : {var}, optional + """ + x.''') + names = [c.name for c in jedi.Script(s).complete()] + # just don't crash + assert names == [] + + +@pytest.mark.skipif(numpydoc_unavailable, + reason='numpydoc module is unavailable') +def test_numpydoc_parameters_alternative_types(): + s = dedent(''' + def foobar(x, y): + """ + Parameters + ---------- + x : int or str or list + """ + x.''') + names = [c.name for c in jedi.Script(s).complete()] + assert 'isupper' in names + assert 'capitalize' in names + assert 'numerator' in names + assert 'append' in names + + +@pytest.mark.skipif(numpydoc_unavailable, + reason='numpydoc module is unavailable') +def test_numpydoc_invalid(): + s = dedent(''' + def foobar(x, y): + """ + Parameters + ---------- + x : int (str, py.path.local + """ + x.''') + + assert not jedi.Script(s).complete() + + +@pytest.mark.skipif(numpydoc_unavailable, + reason='numpydoc module is unavailable') +def test_numpydoc_returns(): + s = dedent(''' + def foobar(): + """ + Returns + ---------- + x : int + y : str + """ + return x + + def bazbiz(): + z = foobar() + z.''') + names = [c.name for c in jedi.Script(s).complete()] + assert 'isupper' in names + assert 'capitalize' in names + assert 'numerator' in names + + +@pytest.mark.skipif(numpydoc_unavailable, + reason='numpydoc module is unavailable') +def test_numpydoc_returns_set_of_values(): + s = dedent(''' + def foobar(): + """ + Returns + ---------- + x : {'foo', 'bar', 100500} + """ + return x + + def bazbiz(): + z = foobar() + z.''') + names = [c.name for c in jedi.Script(s).complete()] + assert 'isupper' in names + assert 'capitalize' in names + assert 'numerator' in names + + +@pytest.mark.skipif(numpydoc_unavailable, + reason='numpydoc module is unavailable') +def test_numpydoc_returns_alternative_types(): + s = dedent(''' + def foobar(): + """ + Returns + ---------- + int or list of str + """ + return x + + def bazbiz(): + z = foobar() + z.''') + names = [c.name for c in jedi.Script(s).complete()] + assert 'isupper' not in names + assert 'capitalize' not in names + assert 'numerator' in names + assert 'append' in names + + +@pytest.mark.skipif(numpydoc_unavailable, + reason='numpydoc module is unavailable') +def test_numpydoc_returns_list_of(): + s = dedent(''' + def foobar(): + """ + Returns + ---------- + list of str + """ + return x + + def bazbiz(): + z = foobar() + z.''') + names = [c.name for c in jedi.Script(s).complete()] + assert 'append' in names + assert 'isupper' not in names + assert 'capitalize' not in names + + +@pytest.mark.skipif(numpydoc_unavailable, + reason='numpydoc module is unavailable') +def test_numpydoc_returns_obj(): + s = dedent(''' + def foobar(x, y): + """ + Returns + ---------- + int or random.Random + """ + return x + y + + def bazbiz(): + z = foobar(x, y) + z.''') + script = jedi.Script(s) + names = [c.name for c in script.complete()] + assert 'numerator' in names + assert 'seed' in names + + +@pytest.mark.skipif(numpydoc_unavailable, + reason='numpydoc module is unavailable') +def test_numpydoc_yields(): + s = dedent(''' + def foobar(): + """ + Yields + ---------- + x : int + y : str + """ + return x + + def bazbiz(): + z = foobar(): + z.''') + names = [c.name for c in jedi.Script(s).complete()] + assert 'isupper' in names + assert 'capitalize' in names + assert 'numerator' in names + + +@pytest.mark.skipif(numpydoc_unavailable or numpy_unavailable, + reason='numpydoc or numpy module is unavailable') +def test_numpy_returns(): + s = dedent(''' + import numpy + x = numpy.asarray([]) + x.d''' + ) + names = [c.name for c in jedi.Script(s).complete()] + assert 'diagonal' in names + + +@pytest.mark.skipif(numpydoc_unavailable or numpy_unavailable, + reason='numpydoc or numpy module is unavailable') +def test_numpy_comp_returns(): + s = dedent(''' + import numpy + x = numpy.array([]) + x.d''' + ) + names = [c.name for c in jedi.Script(s).complete()] + assert 'diagonal' in names + + +def test_decorator(Script): + code = dedent(''' + def decorator(name=None): + def _decorate(func): + @wraps(func) + def wrapper(*args, **kwargs): + """wrapper docstring""" + return func(*args, **kwargs) + return wrapper + return _decorate + + + @decorator('testing') + def check_user(f): + """Nice docstring""" + pass + + check_user''') + + d, = Script(code).infer() + assert d.docstring(raw=True) == 'Nice docstring' + + +def test_method_decorator(Script): + code = dedent(''' + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + """wrapper docstring""" + return func(*args, **kwargs) + return wrapper + + class Foo(): + @decorator + def check_user(self, f): + """Nice docstring""" + pass + + Foo().check_user''') + + d, = Script(code).infer() + assert d.docstring() == 'wrapper(f)\n\nNice docstring' + + +def test_partial(Script): + code = dedent(''' + def foo(): + 'x y z' + from functools import partial + x = partial(foo) + x''') + + for p in Script(code).infer(): + assert p.docstring(raw=True) == 'x y z' + + +def test_basic_str_init_signature(Script, disable_typeshed): + # See GH #1414 and GH #1426 + code = dedent(''' + class Foo(str): + pass + Foo(''') + c, = Script(code).get_signatures() + assert c.name == 'Foo' + + +def test_doctest_result_completion(Script): + code = '''\ + """ + comment + + >>> something = 3 + somethi + """ + something_else = 8 + ''' + c1, c2 = Script(code).complete(line=5) + assert c1.complete == 'ng' + assert c2.complete == 'ng_else' + + +def test_doctest_function_start(Script): + code = dedent('''\ + def test(a, b): + """ + From GH #1585 + + >>> a = {} + >>> b = {} + >>> get_remainder(a, b) == { + ... "foo": 10, "bar": 7 + ... } + """ + return + ''') + assert Script(code).complete(7, 8) + + +@pytest.mark.parametrize( + "name, docstring", [ + ('prop1', 'Returns prop1.'), + ('prop2', 'Returns None or ...'), + ('prop3', 'Non-sense property.'), + ('prop4', 'Django like property'), + ] +) +def test_property(name, docstring, goto_or_complete): + code = dedent(''' + from typing import Optional + class Test: + @property + def prop1(self) -> int: + """Returns prop1.""" + + @property + def prop2(self) -> Optional[int]: + """Returns None or ...""" + + @property + def prop3(self) -> None: + """Non-sense property.""" + + @cached_property # Not imported, but Jedi uses a heuristic + def prop4(self) -> None: + """Django like property""" + ''') + n, = goto_or_complete(code + 'Test().' + name) + assert n.docstring() == docstring diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_extension.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_extension.py new file mode 100644 index 000000000..85cd7c012 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_extension.py @@ -0,0 +1,73 @@ +""" +Test compiled module +""" +import os + +import jedi +from ..helpers import get_example_dir +import pytest + + +def test_completions(Script): + s = Script('import _ctypes; _ctypes.') + assert len(s.complete()) >= 15 + + +def test_get_signatures_extension(Script, environment): + if os.name == 'nt': + func = 'LoadLibrary' + else: + func = 'dlopen' + s = Script('import _ctypes; _ctypes.%s(' % (func,)) + sigs = s.get_signatures() + assert len(sigs) == 1 + assert len(sigs[0].params) in (1, 2) + + +def test_get_signatures_stdlib(Script): + s = Script('import math; math.cos(') + sigs = s.get_signatures() + assert len(sigs) == 1 + assert len(sigs[0].params) == 1 + + +# Check only on linux 64 bit platform and Python3.8. +@pytest.mark.parametrize('load_unsafe_extensions', [False, True]) +@pytest.mark.skipif('sys.platform != "linux" or sys.maxsize <= 2**32 or sys.version_info[:2] != (3, 8)') +def test_init_extension_module(Script, load_unsafe_extensions): + """ + ``__init__`` extension modules are also packages and Jedi should understand + that. + + Originally coming from #472. + + This test was built by the module.c and setup.py combination you can find + in the init_extension_module folder. You can easily build the + `__init__.cpython-38m.so` by compiling it (create a virtualenv and run + `setup.py install`. + + This is also why this test only runs on certain systems and Python 3.8. + """ + + project = jedi.Project(get_example_dir(), load_unsafe_extensions=load_unsafe_extensions) + s = jedi.Script( + 'import init_extension_module as i\ni.', + path='not_existing.py', + project=project, + ) + if load_unsafe_extensions: + assert 'foo' in [c.name for c in s.complete()] + else: + assert 'foo' not in [c.name for c in s.complete()] + + s = jedi.Script( + 'from init_extension_module import foo\nfoo', + path='not_existing.py', + project=project, + ) + c, = s.complete() + assert c.name == 'foo' + if load_unsafe_extensions: + assert c.infer() + else: + assert not c.infer() diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_fstring.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_fstring.py new file mode 100644 index 000000000..ed3030901 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_fstring.py @@ -0,0 +1,18 @@ +import pytest +from textwrap import dedent + + +@pytest.fixture(autouse=True) +def skip_not_supported(environment): + if environment.version_info < (3, 6): + pytest.skip() + + +def test_fstring_multiline(Script): + code = dedent("""\ + '' f'''s{ + str.uppe + ''' + """) + c, = Script(code).complete(line=2, column=9) + assert c.name == 'upper' diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_gradual/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_gradual/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_gradual/test_conversion.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_gradual/test_conversion.py new file mode 100644 index 000000000..ea9ea0135 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_gradual/test_conversion.py @@ -0,0 +1,85 @@ +import os +from parso.cache import parser_cache + +from test.helpers import root_dir +from jedi.api.project import Project +from jedi.inference.gradual.conversion import convert_names + + +def test_sqlite3_conversion(Script): + script1 = Script('import sqlite3; sqlite3.Connection') + d, = script1.infer() + + assert not d.module_path + assert d.full_name == 'sqlite3.Connection' + assert convert_names([d._name], only_stubs=True) + + d, = script1.infer(only_stubs=True) + assert d.is_stub() + assert d.full_name == 'sqlite3.dbapi2.Connection' + + script2 = Script(path=d.module_path) + d, = script2.infer(line=d.line, column=d.column) + assert not d.is_stub() + assert d.full_name == 'sqlite3.Connection' + v, = d._name.infer() + assert v.is_compiled() + + +def test_conversion_of_stub_only(Script): + project = Project(os.path.join(root_dir, 'test', 'completion', 'stub_folder')) + code = 'import stub_only; stub_only.in_stub_only' + d1, = Script(code, project=project).goto() + assert d1.is_stub() + + script = Script(path=d1.module_path, project=project) + d2, = script.goto(line=d1.line, column=d1.column) + assert d2.is_stub() + assert d2.module_path == d1.module_path + assert d2.line == d1.line + assert d2.column == d1.column + assert d2.name == 'in_stub_only' + + +def test_goto_on_file(Script): + project = Project(os.path.join(root_dir, 'test', 'completion', 'stub_folder')) + script = Script('import stub_only; stub_only.Foo', project=project) + d1, = script.goto() + v, = d1._name.infer() + foo, bar, obj = v.py__mro__() + assert foo.py__name__() == 'Foo' + assert bar.py__name__() == 'Bar' + assert obj.py__name__() == 'object' + + # Make sure we go to Bar, because Foo is a bit before: `class Foo(Bar):` + script = Script(path=d1.module_path, project=project) + d2, = script.goto(line=d1.line, column=d1.column + 4) + assert d2.name == 'Bar' + + +def test_goto_import(Script): + code = 'from abc import ABC; ABC' + d, = Script(code).goto(only_stubs=True) + assert d.is_stub() + d, = Script(code).goto() + assert not d.is_stub() + + +def test_stub_get_line_code(Script): + code = 'from abc import ABC; ABC' + script = Script(code) + d, = script.goto(only_stubs=True) + # Replace \r for tests on Windows + assert d.get_line_code().replace('\r', '') == 'class ABC(metaclass=ABCMeta): ...\n' + del parser_cache[script._inference_state.latest_grammar._hashed][d.module_path] + d, = Script(path=d.module_path).goto(d.line, d.column, only_stubs=True) + assert d.is_stub() + assert d.get_line_code().replace('\r', '') == 'class ABC(metaclass=ABCMeta): ...\n' + + +def test_os_stat_result(Script): + d, = Script('import os; os.stat_result').goto() + assert d.is_stub() + n = d._name + # This should not be a different stub name + assert convert_names([n]) == [n] diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_gradual/test_stub_loading.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_gradual/test_stub_loading.py new file mode 100644 index 000000000..ab64f00c6 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_gradual/test_stub_loading.py @@ -0,0 +1,64 @@ +from functools import partial +from test.helpers import get_example_dir +from jedi.api.project import Project + +import pytest + + +@pytest.fixture +def ScriptInStubFolder(Script): + path = get_example_dir('stub_packages') + project = Project(path, sys_path=[path], smart_sys_path=False) + return partial(Script, project=project) + + +@pytest.mark.parametrize( + ('code', 'expected'), [ + ('from no_python import foo', ['int']), + ('from with_python import stub_only', ['str']), + ('from with_python import python_only', ['int']), + ('from with_python import both', ['int']), + ('from with_python import something_random', []), + ('from with_python.module import in_sub_module', ['int']), + ] +) +def test_find_stubs_infer(ScriptInStubFolder, code, expected): + defs = ScriptInStubFolder(code).infer() + assert [d.name for d in defs] == expected + + +func_without_stub_doc = 'func_without_stub(a)\n\nnostubdoc' +func_with_stub_doc = 'func_with_stub(b: int) -> float\n\nwithstubdoc' + + +@pytest.mark.parametrize( + ('code', 'expected'), [ + ('from with_python import stub_only', ''), + ('from with_python import python_only', ''), + ('from with_python import both', ''), + + ('import with_python; with_python.func_without_stub', ''), + ('import with_python.module; with_python.module.func_without_stub', func_without_stub_doc), + ('from with_python import module; module.func_without_stub', func_without_stub_doc), + ('from with_python.module import func_without_stub', func_without_stub_doc), + ('from with_python.module import func_without_stub as f; f', func_without_stub_doc), + ('from with_python.module import func_without_stub; func_without_stub', + func_without_stub_doc), + ('from with_python import func_without_stub', ''), + ('from with_python import func_without_stub as f; f', ''), + ('from with_python import func_without_stub; func_without_stub', ''), + + ('import with_python; with_python.func_with_stub', func_with_stub_doc), + ('import with_python.module; with_python.module.func_with_stub', func_with_stub_doc), + ('from with_python import module; module.func_with_stub', func_with_stub_doc), + ('from with_python.module import func_with_stub', func_with_stub_doc), + ('from with_python.module import func_with_stub as f; f', func_with_stub_doc), + ('from with_python.module import func_with_stub; func_with_stub', func_with_stub_doc), + ('from with_python import func_with_stub', func_with_stub_doc), + ('from with_python import func_with_stub as f; f', func_with_stub_doc), + ('from with_python import func_with_stub; func_with_stub', func_with_stub_doc), + ] +) +def test_docstrings(ScriptInStubFolder, code, expected): + d, = ScriptInStubFolder(code).help() + assert d.docstring() == expected diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_gradual/test_stubs.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_gradual/test_stubs.py new file mode 100644 index 000000000..b3a042c7a --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_gradual/test_stubs.py @@ -0,0 +1,90 @@ +import os + +import pytest + +from jedi.api.project import Project +from test.helpers import root_dir + + +@pytest.mark.parametrize('type_', ['goto', 'infer']) +@pytest.mark.parametrize('way', ['direct', 'indirect']) +@pytest.mark.parametrize( + 'kwargs', [ + dict(only_stubs=False, prefer_stubs=False), + dict(only_stubs=False, prefer_stubs=True), + dict(only_stubs=True, prefer_stubs=False), + ] +) +@pytest.mark.parametrize( + ('code', 'full_name', 'has_stub', 'has_python', 'options'), [ + ['import os; os.walk', 'os.walk', True, True, {}], + ['from collections import Counter', 'collections.Counter', True, True, {}], + ['from collections', 'collections', True, True, {}], + ['from collections import Counter; Counter', 'collections.Counter', True, True, {}], + ['from collections import Counter; Counter()', 'collections.Counter', True, True, {}], + ['from collections import Counter; Counter.most_common', + 'collections.Counter.most_common', True, True, {}], + ['from collections import deque', 'collections.deque', True, False, + {'goto_has_python': True}], + + ['from keyword import kwlist; kwlist', 'typing.Sequence', True, True, + {'goto_full_name': 'keyword.kwlist'}], + ['from keyword import kwlist', 'typing.Sequence', True, True, + {'goto_full_name': 'keyword.kwlist'}], + + ['from socket import AF_INET', 'socket.AddressFamily', True, False, + {'goto_full_name': 'socket.AF_INET'}], + ['from socket import socket', 'socket.socket', True, True, {}], + + ['import with_stub', 'with_stub', True, True, {}], + ['import with_stub', 'with_stub', True, True, {}], + ['import with_stub_folder.python_only', 'with_stub_folder.python_only', False, True, {}], + ['import stub_only', 'stub_only', True, False, {}], + ]) +def test_infer_and_goto(Script, code, full_name, has_stub, has_python, way, + kwargs, type_, options, environment): + if type_ == 'infer' and full_name == 'typing.Sequence' and environment.version_info >= (3, 7): + # In Python 3.7+ there's not really a sequence definition, there's just + # a name that leads nowhere. + has_python = False + + project = Project(os.path.join(root_dir, 'test', 'completion', 'stub_folder')) + s = Script(code, project=project) + prefer_stubs = kwargs['prefer_stubs'] + only_stubs = kwargs['only_stubs'] + + if type_ == 'goto': + full_name = options.get('goto_full_name', full_name) + has_python = options.get('goto_has_python', has_python) + + if way == 'direct': + if type_ == 'goto': + defs = s.goto(follow_imports=True, **kwargs) + else: + defs = s.infer(**kwargs) + else: + goto_defs = s.goto( + # Prefering stubs when we want to go to python and vice versa + prefer_stubs=not (prefer_stubs or only_stubs), + follow_imports=True, + ) + if type_ == 'goto': + defs = [d for goto_def in goto_defs for d in goto_def.goto(**kwargs)] + else: + defs = [d for goto_def in goto_defs for d in goto_def.infer(**kwargs)] + + if not has_stub and only_stubs: + assert not defs + else: + assert defs + + for d in defs: + if prefer_stubs and has_stub: + assert d.is_stub() + elif only_stubs: + assert d.is_stub() + else: + assert has_python == (not d.is_stub()) + assert d.full_name == full_name + + assert d.is_stub() == (d.module_path.suffix == '.pyi') diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_gradual/test_typeshed.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_gradual/test_typeshed.py new file mode 100644 index 000000000..ac3f17274 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_gradual/test_typeshed.py @@ -0,0 +1,242 @@ +import os + +import pytest +from parso.utils import PythonVersionInfo + +from jedi.inference.gradual import typeshed +from jedi.inference.value import TreeInstance, BoundMethod, FunctionValue, \ + MethodValue, ClassValue +from jedi.inference.names import StubName + +TYPESHED_PYTHON3 = os.path.join(typeshed.TYPESHED_PATH, 'stdlib', '3') + + +def test_get_typeshed_directories(): + def get_dirs(version_info): + return { + p.path.replace(str(typeshed.TYPESHED_PATH), '').lstrip(os.path.sep) + for p in typeshed._get_typeshed_directories(version_info) + } + + def transform(set_): + return {x.replace('/', os.path.sep) for x in set_} + + dirs = get_dirs(PythonVersionInfo(3, 7)) + assert dirs == transform({'stdlib/2and3', 'stdlib/3', 'stdlib/3.7', + 'third_party/2and3', + 'third_party/3', 'third_party/3.7'}) + + +def test_get_stub_files(): + map_ = typeshed._create_stub_map(typeshed.PathInfo(TYPESHED_PYTHON3, is_third_party=False)) + assert map_['functools'].path == os.path.join(TYPESHED_PYTHON3, 'functools.pyi') + + +def test_function(Script, environment): + code = 'import threading; threading.current_thread' + def_, = Script(code).infer() + value = def_._name._value + assert isinstance(value, FunctionValue), value + + def_, = Script(code + '()').infer() + value = def_._name._value + assert isinstance(value, TreeInstance) + + def_, = Script('import threading; threading.Thread').infer() + assert isinstance(def_._name._value, ClassValue), def_ + + +def test_keywords_variable(Script): + code = 'import keyword; keyword.kwlist' + for seq in Script(code).infer(): + assert seq.name == 'Sequence' + # This points towards the typeshed implementation + stub_seq, = seq.goto(only_stubs=True) + assert str(stub_seq.module_path).startswith(str(typeshed.TYPESHED_PATH)) + + +def test_class(Script): + def_, = Script('import threading; threading.Thread').infer() + value = def_._name._value + assert isinstance(value, ClassValue), value + + +def test_instance(Script): + def_, = Script('import threading; threading.Thread()').infer() + value = def_._name._value + assert isinstance(value, TreeInstance) + + +def test_class_function(Script): + def_, = Script('import threading; threading.Thread.getName').infer() + value = def_._name._value + assert isinstance(value, MethodValue), value + + +def test_method(Script): + code = 'import threading; threading.Thread().getName' + def_, = Script(code).infer() + value = def_._name._value + assert isinstance(value, BoundMethod), value + assert isinstance(value._wrapped_value, MethodValue), value + + def_, = Script(code + '()').infer() + value = def_._name._value + assert isinstance(value, TreeInstance) + assert value.class_value.py__name__() == 'str' + + +def test_sys_exc_info(Script): + code = 'import sys; sys.exc_info()' + none, def_ = Script(code + '[1]').infer() + # It's an optional. + assert def_.name == 'BaseException' + assert def_.module_path == typeshed.TYPESHED_PATH.joinpath( + 'stdlib', '3', 'builtins.pyi' + ) + assert def_.type == 'instance' + assert none.name == 'NoneType' + assert none.module_path is None + + none, def_ = Script(code + '[0]').infer() + assert def_.name == 'BaseException' + assert def_.type == 'class' + + +def test_sys_getwindowsversion(Script, environment): + # This should only exist on Windows, but type inference should happen + # everywhere. + definitions = Script('import sys; sys.getwindowsversion().major').infer() + def_, = definitions + assert def_.name == 'int' + + +def test_sys_hexversion(Script): + script = Script('import sys; sys.hexversion') + def_, = script.complete() + assert isinstance(def_._name, StubName), def_._name + assert str(def_.module_path).startswith(str(typeshed.TYPESHED_PATH)) + def_, = script.infer() + assert def_.name == 'int' + + +def test_math(Script): + def_, = Script('import math; math.acos()').infer() + assert def_.name == 'float' + value = def_._name._value + assert value + + +def test_type_var(Script): + def_, = Script('import typing; T = typing.TypeVar("T1")').infer() + assert def_.name == 'TypeVar' + assert def_.description == 'class TypeVar' + + +@pytest.mark.parametrize( + 'code, full_name', ( + ('import math', 'math'), + ('from math import cos', 'math.cos') + ) +) +def test_math_is_stub(Script, code, full_name): + s = Script(code) + cos, = s.infer() + wanted = ('typeshed', 'stdlib', '2and3', 'math.pyi') + assert cos.module_path.parts[-4:] == wanted + assert cos.is_stub() is True + assert cos.goto(only_stubs=True) == [cos] + assert cos.full_name == full_name + + cos, = s.goto() + assert cos.module_path.parts[-4:] == wanted + assert cos.goto(only_stubs=True) == [cos] + assert cos.is_stub() is True + assert cos.full_name == full_name + + +def test_goto_stubs(Script): + s = Script('import os; os') + os_module, = s.infer() + assert os_module.full_name == 'os' + assert os_module.is_stub() is False + stub, = os_module.goto(only_stubs=True) + assert stub.is_stub() is True + + os_module, = s.goto() + + +def _assert_is_same(d1, d2): + assert d1.name == d2.name + assert d1.module_path == d2.module_path + assert d1.line == d2.line + assert d1.column == d2.column + + +@pytest.mark.parametrize('type_', ['goto', 'infer']) +@pytest.mark.parametrize( + 'code', [ + 'import os; os.walk', + 'from collections import Counter; Counter', + 'from collections import Counter; Counter()', + 'from collections import Counter; Counter.most_common', + 'from collections import Counter; Counter().most_common', + ]) +def test_goto_stubs_on_itself(Script, code, type_): + """ + If goto_stubs is used on an identifier in e.g. the stdlib, we should goto + the stub of it. + """ + s = Script(code) + if type_ == 'infer': + def_, = s.infer() + else: + def_, = s.goto(follow_imports=True) + stub, = def_.goto(only_stubs=True) + + script_on_source = Script(path=def_.module_path) + if type_ == 'infer': + definition, = script_on_source.infer(def_.line, def_.column) + else: + definition, = script_on_source.goto(def_.line, def_.column) + same_stub, = definition.goto(only_stubs=True) + _assert_is_same(same_stub, stub) + _assert_is_same(definition, def_) + assert same_stub.module_path != def_.module_path + + # And the reverse. + script_on_stub = Script( + path=same_stub.module_path, + ) + + if type_ == 'infer': + same_definition, = script_on_stub.infer(same_stub.line, same_stub.column) + same_definition2, = same_stub.infer() + else: + same_definition, = script_on_stub.goto(same_stub.line, same_stub.column) + same_definition2, = same_stub.goto() + + _assert_is_same(same_definition, definition) + _assert_is_same(same_definition, same_definition2) + + +def test_module_exists_only_as_stub(Script): + try: + import redis + except ImportError: + pass + else: + pytest.skip('redis is already installed, it should only exist as a stub for this test') + redis_path = os.path.join(typeshed.TYPESHED_PATH, 'third_party', '2and3', 'redis') + assert os.path.isdir(redis_path) + assert not Script('import redis').infer() + + +def test_django_exists_only_as_stub(Script): + try: + import django + except ImportError: + pass + else: + pytest.skip('django is already installed, it should only exist as a stub for this test') + assert not Script('import django').infer() diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_implicit_namespace_package.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_implicit_namespace_package.py new file mode 100644 index 000000000..4fbbfccf9 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_implicit_namespace_package.py @@ -0,0 +1,100 @@ +from test.helpers import get_example_dir, example_dir +from jedi import Project + + +def test_implicit_namespace_package(Script): + sys_path = [get_example_dir('implicit_namespace_package', 'ns1'), + get_example_dir('implicit_namespace_package', 'ns2')] + project = Project('.', sys_path=sys_path) + + def script_with_path(*args, **kwargs): + return Script(project=project, *args, **kwargs) + + # goto definition + assert script_with_path('from pkg import ns1_file').infer() + assert script_with_path('from pkg import ns2_file').infer() + assert not script_with_path('from pkg import ns3_file').infer() + + # goto assignment + tests = { + 'from pkg.ns2_file import foo': 'ns2_file!', + 'from pkg.ns1_file import foo': 'ns1_file!', + } + for source, solution in tests.items(): + ass = script_with_path(source).goto() + assert len(ass) == 1 + assert ass[0].description == "foo = '%s'" % solution + + # completion + completions = script_with_path('from pkg import ').complete() + names = [c.name for c in completions] + compare = ['ns1_file', 'ns2_file'] + # must at least contain these items, other items are not important + assert set(compare) == set(names) + + tests = { + 'from pkg import ns2_file as x': 'ns2_file!', + 'from pkg import ns1_file as x': 'ns1_file!' + } + for source, solution in tests.items(): + for c in script_with_path(source + '; x.').complete(): + if c.name == 'foo': + completion = c + solution = "foo = '%s'" % solution + assert completion.description == solution + + +def test_implicit_nested_namespace_package(Script): + code = 'from implicit_nested_namespaces.namespace.pkg.module import CONST' + + project = Project('.', sys_path=[example_dir]) + script = Script(code, project=project) + + result = script.infer(line=1, column=61) + + assert len(result) == 1 + + implicit_pkg, = Script(code, project=project).infer(column=10) + assert implicit_pkg.type == 'namespace' + assert implicit_pkg.module_path is None + + +def test_implicit_namespace_package_import_autocomplete(Script): + code = 'from implicit_name' + + project = Project('.', sys_path=[example_dir]) + script = Script(code, project=project) + compl = script.complete() + assert [c.name for c in compl] == ['implicit_namespace_package'] + + +def test_namespace_package_in_multiple_directories_autocompletion(Script): + code = 'from pkg.' + sys_path = [get_example_dir('implicit_namespace_package', 'ns1'), + get_example_dir('implicit_namespace_package', 'ns2')] + + project = Project('.', sys_path=sys_path) + script = Script(code, project=project) + compl = script.complete() + assert set(c.name for c in compl) == set(['ns1_file', 'ns2_file']) + + +def test_namespace_package_in_multiple_directories_goto_definition(Script): + code = 'from pkg import ns1_file' + sys_path = [get_example_dir('implicit_namespace_package', 'ns1'), + get_example_dir('implicit_namespace_package', 'ns2')] + project = Project('.', sys_path=sys_path) + script = Script(code, project=project) + result = script.infer() + assert len(result) == 1 + + +def test_namespace_name_autocompletion_full_name(Script): + code = 'from pk' + sys_path = [get_example_dir('implicit_namespace_package', 'ns1'), + get_example_dir('implicit_namespace_package', 'ns2')] + + project = Project('.', sys_path=sys_path) + script = Script(code, project=project) + compl = script.complete() + assert set(c.full_name for c in compl) == set(['pkg']) diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_imports.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_imports.py new file mode 100644 index 000000000..51e654741 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_imports.py @@ -0,0 +1,511 @@ +""" +Tests of various import related things that could not be tested with "Black Box +Tests". +""" + +import os +from pathlib import Path + +import pytest + +import jedi +from jedi.file_io import FileIO +from jedi.inference import compiled +from jedi.inference import imports +from jedi.api.project import Project +from jedi.inference.gradual.conversion import _stub_to_python_value_set +from jedi.inference.references import get_module_contexts_containing_name +from ..helpers import get_example_dir, test_dir, test_dir_project, root_dir +from jedi.inference.compiled.subprocess.functions import _find_module_py33, _find_module + +THIS_DIR = os.path.dirname(__file__) + + +def test_find_module_basic(): + """Needs to work like the old find_module.""" + assert _find_module_py33('_io') == (None, False) + with pytest.raises(ImportError): + assert _find_module_py33('_DOESNTEXIST_') == (None, None) + + +def test_find_module_package(): + file_io, is_package = _find_module('json') + assert file_io.path.parts[-2:] == ('json', '__init__.py') + assert is_package is True + + +def test_find_module_not_package(): + file_io, is_package = _find_module('io') + assert file_io.path.name == 'io.py' + assert is_package is False + + +pkg_zip_path = Path(get_example_dir('zipped_imports', 'pkg.zip')) + + +def test_find_module_package_zipped(Script, inference_state, environment): + sys_path = environment.get_sys_path() + [str(pkg_zip_path)] + + project = Project('.', sys_path=sys_path) + script = Script('import pkg; pkg.mod', project=project) + assert len(script.complete()) == 1 + + file_io, is_package = inference_state.compiled_subprocess.get_module_info( + sys_path=sys_path, + string='pkg', + full_name='pkg' + ) + assert file_io is not None + assert file_io.path.parts[-3:] == ('pkg.zip', 'pkg', '__init__.py') + assert file_io._zip_path.name == 'pkg.zip' + assert is_package is True + + +@pytest.mark.parametrize( + 'code, file, package, path', [ + ('import pkg', '__init__.py', 'pkg', 'pkg'), + ('import pkg', '__init__.py', 'pkg', 'pkg'), + + ('from pkg import module', 'module.py', 'pkg', None), + ('from pkg.module', 'module.py', 'pkg', None), + + ('from pkg import nested', os.path.join('nested', '__init__.py'), + 'pkg.nested', os.path.join('pkg', 'nested')), + ('from pkg.nested', os.path.join('nested', '__init__.py'), + 'pkg.nested', os.path.join('pkg', 'nested')), + + ('from pkg.nested import nested_module', + os.path.join('nested', 'nested_module.py'), 'pkg.nested', None), + ('from pkg.nested.nested_module', + os.path.join('nested', 'nested_module.py'), 'pkg.nested', None), + + ('from pkg.namespace import namespace_module', + os.path.join('namespace', 'namespace_module.py'), 'pkg.namespace', None), + ('from pkg.namespace.namespace_module', + os.path.join('namespace', 'namespace_module.py'), 'pkg.namespace', None), + ] + +) +def test_correct_zip_package_behavior(Script, inference_state, environment, code, + file, package, path): + sys_path = environment.get_sys_path() + [str(pkg_zip_path)] + pkg, = Script(code, project=Project('.', sys_path=sys_path)).infer() + value, = pkg._name.infer() + assert value.py__file__() == pkg_zip_path.joinpath('pkg', file) + assert '.'.join(value.py__package__()) == package + assert value.is_package() is (path is not None) + if path is not None: + assert value.py__path__() == [str(pkg_zip_path.joinpath(path))] + + value.string_names = None + assert value.py__package__() == [] + + +@pytest.mark.parametrize("code,names", [ + ("from pkg.", {"module", "nested", "namespace"}), + ("from pkg.nested.", {"nested_module"}) +]) +def test_zip_package_import_complete(Script, environment, code, names): + sys_path = environment.get_sys_path() + [str(pkg_zip_path)] + completions = Script(code, project=Project('.', sys_path=sys_path)).complete() + assert names == {c.name for c in completions} + + +def test_find_module_not_package_zipped(Script, inference_state, environment): + path = get_example_dir('zipped_imports', 'not_pkg.zip') + sys_path = environment.get_sys_path() + [path] + script = Script('import not_pkg; not_pkg.val', project=Project('.', sys_path=sys_path)) + assert len(script.complete()) == 1 + + file_io, is_package = inference_state.compiled_subprocess.get_module_info( + sys_path=map(str, sys_path), + string='not_pkg', + full_name='not_pkg' + ) + assert file_io.path.parts[-2:] == ('not_pkg.zip', 'not_pkg.py') + assert is_package is False + + +def test_import_not_in_sys_path(Script, environment): + """ + non-direct imports (not in sys.path) + + This is in the end just a fallback. + """ + path = get_example_dir() + module_path = os.path.join(path, 'not_in_sys_path', 'pkg', 'module.py') + # This project tests the smart path option of Project. The sys_path is + # explicitly given to make sure that the path is just dumb and only + # includes non-folder dependencies. + project = Project(path, sys_path=environment.get_sys_path()) + a = Script(path=module_path, project=project).infer(line=5) + assert a[0].name == 'int' + + a = Script(path=module_path, project=project).infer(line=6) + assert a[0].name == 'str' + a = Script(path=module_path, project=project).infer(line=7) + assert a[0].name == 'str' + + +@pytest.mark.parametrize("code,name", [ + ("from flask.ext import foo; foo.", "Foo"), # flask_foo.py + ("from flask.ext import bar; bar.", "Bar"), # flaskext/bar.py + ("from flask.ext import baz; baz.", "Baz"), # flask_baz/__init__.py + ("from flask.ext import moo; moo.", "Moo"), # flaskext/moo/__init__.py + ("from flask.ext.", "foo"), + ("from flask.ext.", "bar"), + ("from flask.ext.", "baz"), + ("from flask.ext.", "moo"), + pytest.param("import flask.ext.foo; flask.ext.foo.", "Foo", marks=pytest.mark.xfail), + pytest.param("import flask.ext.bar; flask.ext.bar.", "Foo", marks=pytest.mark.xfail), + pytest.param("import flask.ext.baz; flask.ext.baz.", "Foo", marks=pytest.mark.xfail), + pytest.param("import flask.ext.moo; flask.ext.moo.", "Foo", marks=pytest.mark.xfail), +]) +def test_flask_ext(Script, code, name): + """flask.ext.foo is really imported from flaskext.foo or flask_foo. + """ + path = get_example_dir('flask-site-packages') + completions = Script(code, project=Project('.', sys_path=[path])).complete() + assert name in [c.name for c in completions] + + +def test_not_importable_file(Script): + src = 'import not_importable_file as x; x.' + assert not Script(src, path='example.py', project=test_dir_project).complete() + + +def test_import_unique(Script): + src = "import os; os.path" + defs = Script(src, path='example.py').infer() + parent_contexts = [d._name._value for d in defs] + assert len(parent_contexts) == len(set(parent_contexts)) + + +def test_cache_works_with_sys_path_param(Script, tmpdir): + foo_path = tmpdir.join('foo') + bar_path = tmpdir.join('bar') + foo_path.join('module.py').write('foo = 123', ensure=True) + bar_path.join('module.py').write('bar = 123', ensure=True) + foo_completions = Script( + 'import module; module.', + project=Project('.', sys_path=[foo_path.strpath]), + ).complete() + bar_completions = Script( + 'import module; module.', + project=Project('.', sys_path=[bar_path.strpath]), + ).complete() + assert 'foo' in [c.name for c in foo_completions] + assert 'bar' not in [c.name for c in foo_completions] + + assert 'bar' in [c.name for c in bar_completions] + assert 'foo' not in [c.name for c in bar_completions] + + +def test_import_completion_docstring(Script): + import abc + s = Script('"""test"""\nimport ab') + abc_completions = [c for c in s.complete() if c.name == 'abc'] + assert len(abc_completions) == 1 + assert abc_completions[0].docstring(fast=False) == abc.__doc__ + + # However for performance reasons not all modules are loaded and the + # docstring is empty in this case. + assert abc_completions[0].docstring() == '' + + +def test_goto_definition_on_import(Script): + assert Script("import sys_blabla").infer(1, 8) == [] + assert len(Script("import sys").infer(1, 8)) == 1 + + +def test_complete_on_empty_import(ScriptWithProject): + path = os.path.join(test_dir, 'whatever.py') + assert ScriptWithProject("from datetime import").complete()[0].name == 'import' + # should just list the files in the directory + assert 10 < len(ScriptWithProject("from .", path=path).complete()) < 30 + + # Global import + assert len(ScriptWithProject("from . import", path=path).complete(1, 5)) > 30 + # relative import + assert 10 < len(ScriptWithProject("from . import", path=path).complete(1, 6)) < 30 + + # Global import + assert len(ScriptWithProject("from . import classes", path=path).complete(1, 5)) > 30 + # relative import + assert 10 < len(ScriptWithProject("from . import classes", path=path).complete(1, 6)) < 30 + + wanted = {'ImportError', 'import', 'ImportWarning'} + assert {c.name for c in ScriptWithProject("import").complete()} == wanted + assert len(ScriptWithProject("import import", path=path).complete()) > 0 + + # 111 + assert ScriptWithProject("from datetime import").complete()[0].name == 'import' + assert ScriptWithProject("from datetime import ").complete() + + +def test_imports_on_global_namespace_without_path(Script): + """If the path is None, there shouldn't be any import problem""" + completions = Script("import operator").complete() + assert [c.name for c in completions] == ['operator'] + completions = Script("import operator", path='example.py').complete() + assert [c.name for c in completions] == ['operator'] + + # the first one has a path the second doesn't + completions = Script("import keyword", path='example.py').complete() + assert [c.name for c in completions] == ['keyword'] + completions = Script("import keyword").complete() + assert [c.name for c in completions] == ['keyword'] + + +def test_named_import(Script): + """named import - jedi-vim issue #8""" + s = "import time as dt" + assert len(Script(s, path='/').infer(1, 15)) == 1 + assert len(Script(s, path='/').infer(1, 10)) == 1 + + +def test_nested_import(Script): + s = "import multiprocessing.dummy; multiprocessing.dummy" + g = Script(s).goto() + assert len(g) == 1 + assert (g[0].line, g[0].column) != (0, 0) + + +def test_goto(Script): + sys, = Script("import sys").goto(follow_imports=True) + assert sys.type == 'module' + + +def test_os_after_from(Script): + def check(source, result, column=None): + completions = Script(source).complete(column=column) + assert [c.name for c in completions] == result + + check('\nfrom os. ', ['path']) + check('\nfrom os ', ['import']) + check('from os ', ['import']) + check('\nfrom os import whatever', ['import'], len('from os im')) + + check('from os\\\n', ['import']) + check('from os \\\n', ['import']) + + +def test_os_issues(Script): + def import_names(*args, **kwargs): + return [d.name for d in Script(*args).complete(**kwargs)] + + # Github issue #759 + s = 'import os, s' + assert 'sys' in import_names(s) + assert 'path' not in import_names(s, column=len(s) - 1) + assert 'os' in import_names(s, column=len(s) - 3) + + # Some more checks + s = 'from os import path, e' + assert 'environ' in import_names(s) + assert 'json' not in import_names(s, column=len(s) - 1) + assert 'environ' in import_names(s, column=len(s) - 1) + assert 'path' in import_names(s, column=len(s) - 3) + + +def test_path_issues(Script): + """ + See pull request #684 for details. + """ + source = '''from datetime import ''' + assert Script(source).complete() + + +def test_compiled_import_none(monkeypatch, Script): + """ + Related to #1079. An import might somehow fail and return None. + """ + script = Script('import sys') + monkeypatch.setattr(compiled, 'load_module', lambda *args, **kwargs: None) + def_, = script.infer() + assert def_.type == 'module' + value, = def_._name.infer() + assert not _stub_to_python_value_set(value) + + +@pytest.mark.parametrize( + ('path', 'is_package', 'goal'), [ + # Both of these tests used to return relative paths to the module + # context that was initially given, but now we just work with the file + # system. + (os.path.join(THIS_DIR, 'test_docstring.py'), False, + ('test_inference', 'test_imports')), + (os.path.join(THIS_DIR, '__init__.py'), True, + ('test_inference', 'test_imports')), + ] +) +def test_get_modules_containing_name(inference_state, path, goal, is_package): + inference_state.project = Project(test_dir) + module = imports._load_python_module( + inference_state, + FileIO(path), + import_names=('ok', 'lala', 'x'), + is_package=is_package, + ) + assert module + module_context = module.as_context() + input_module, found_module = get_module_contexts_containing_name( + inference_state, + [module_context], + 'string_that_only_exists_here' + ) + assert input_module is module_context + assert found_module.string_names == goal + + +@pytest.mark.parametrize( + 'path', ('api/whatever/test_this.py', 'api/whatever/file')) +@pytest.mark.parametrize('empty_sys_path', (False, True)) +def test_relative_imports_with_multiple_similar_directories(Script, path, empty_sys_path): + dir = get_example_dir('issue1209') + if empty_sys_path: + project = Project(dir, sys_path=(), smart_sys_path=False) + else: + project = Project(dir) + script = Script( + "from . ", + path=os.path.join(dir, path), + project=project, + ) + name, import_ = script.complete() + assert import_.name == 'import' + assert name.name == 'api_test1' + + +def test_relative_imports_with_outside_paths(Script): + dir = get_example_dir('issue1209') + project = Project(dir, sys_path=[], smart_sys_path=False) + script = Script( + "from ...", + path=os.path.join(dir, 'api/whatever/test_this.py'), + project=project, + ) + assert [c.name for c in script.complete()] == ['api', 'whatever'] + + script = Script( + "from " + '.' * 100, + path=os.path.join(dir, 'api/whatever/test_this.py'), + project=project, + ) + assert not script.complete() + + +def test_relative_imports_without_path(Script): + path = get_example_dir('issue1209', 'api', 'whatever') + project = Project(path, sys_path=[], smart_sys_path=False) + script = Script("from . ", project=project) + assert [c.name for c in script.complete()] == ['api_test1', 'import'] + + script = Script("from .. ", project=project) + assert [c.name for c in script.complete()] == ['import', 'whatever'] + + script = Script("from ... ", project=project) + assert [c.name for c in script.complete()] == ['api', 'import', 'whatever'] + + +def test_relative_import_out_of_file_system(Script): + code = "from " + '.' * 100 + assert not Script(code).complete() + script = Script(code + ' ') + import_, = script.complete() + assert import_.name == 'import' + + script = Script("from " + '.' * 100 + 'abc import ABCMeta') + assert not script.infer() + assert not script.complete() + + +@pytest.mark.parametrize( + 'level, directory, project_path, result', [ + (1, '/a/b/c', '/a', (['b', 'c'], '/a')), + (2, '/a/b/c', '/a', (['b'], '/a')), + (3, '/a/b/c', '/a', ([], '/a')), + (4, '/a/b/c', '/a', (None, '/')), + (5, '/a/b/c', '/a', (None, None)), + (1, '/', '/', ([], '/')), + (2, '/', '/', (None, None)), + (1, '/a/b', '/a/b/c', (None, '/a/b')), + (2, '/a/b', '/a/b/c', (None, '/a')), + (3, '/a/b', '/a/b/c', (None, '/')), + ] +) +def test_level_to_import_path(level, directory, project_path, result): + assert imports._level_to_base_import_path(project_path, directory, level) == result + + +def test_import_name_calculation(Script): + s = Script(path=os.path.join(test_dir, 'completion', 'isinstance.py')) + m = s._get_module_context() + assert m.string_names == ('test', 'completion', 'isinstance') + + +@pytest.mark.parametrize('name', ('builtins', 'typing')) +def test_pre_defined_imports_module(Script, environment, name): + path = os.path.join(root_dir, name + '.py') + module = Script('', path=path)._get_module_context() + assert module.string_names == (name,) + + assert str(module.inference_state.builtins_module.py__file__()) != path + assert str(module.inference_state.typing_module.py__file__()) != path + + +@pytest.mark.parametrize('name', ('builtins', 'typing')) +def test_import_needed_modules_by_jedi(Script, environment, tmpdir, name): + module_path = tmpdir.join(name + '.py') + module_path.write('int = ...') + script = Script( + 'import ' + name, + path=tmpdir.join('something.py').strpath, + project=Project('.', sys_path=[tmpdir.strpath] + environment.get_sys_path()), + ) + module, = script.infer() + assert str(module._inference_state.builtins_module.py__file__()) != module_path + assert str(module._inference_state.typing_module.py__file__()) != module_path + + +def test_import_with_semicolon(Script): + names = [c.name for c in Script('xzy; from abc import ').complete()] + assert 'ABCMeta' in names + assert 'abc' not in names + + +def test_relative_import_star(Script): + # Coming from github #1235 + source = """ + from . import * + furl.c + """ + script = Script(source, path='export.py') + + assert script.complete(3, len("furl.c")) + + +@pytest.mark.parametrize('with_init', [False, True]) +def test_relative_imports_without_path_and_setup_py( + Script, inference_state, environment, tmpdir, with_init): + # Contrary to other tests here we create a temporary folder that is not + # part of a folder with a setup.py that signifies + tmpdir.join('file1.py').write('do_foo = 1') + other_path = tmpdir.join('other_files') + other_path.join('file2.py').write('def do_nothing():\n pass', ensure=True) + if with_init: + other_path.join('__init__.py').write('') + + for name, code in [('file2', 'from . import file2'), + ('file1', 'from .. import file1')]: + for func in (jedi.Script.goto, jedi.Script.infer): + n, = func(Script(code, path=other_path.join('test1.py').strpath)) + assert n.name == name + assert n.type == 'module' + assert n.line == 1 + + +def test_import_recursion(Script): + path = get_example_dir('import-recursion', "cq_example.py") + for c in Script(path=path).complete(3, 3): + c.docstring() diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_literals.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_literals.py new file mode 100644 index 000000000..f63b616f9 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_literals.py @@ -0,0 +1,42 @@ +import pytest +from jedi.inference.value import TreeInstance + + +def _infer_literal(Script, code, is_fstring=False): + def_, = Script(code).infer() + if is_fstring: + assert def_.name == 'str' + assert isinstance(def_._name._value, TreeInstance) + return '' + else: + return def_._name._value.get_safe_value() + + +def test_f_strings(Script, environment): + """ + f literals are not really supported in Jedi. They just get ignored and an + empty string is returned. + """ + if environment.version_info < (3, 6): + pytest.skip() + + assert _infer_literal(Script, 'f"asdf"', is_fstring=True) == '' + assert _infer_literal(Script, 'f"{asdf} "', is_fstring=True) == '' + assert _infer_literal(Script, 'F"{asdf} "', is_fstring=True) == '' + assert _infer_literal(Script, 'rF"{asdf} "', is_fstring=True) == '' + + +def test_rb_strings(Script, environment): + assert _infer_literal(Script, 'x = br"asdf"; x') == b'asdf' + assert _infer_literal(Script, 'x = rb"asdf"; x') == b'asdf' + + +def test_thousand_separators(Script, environment): + if environment.version_info < (3, 6): + pytest.skip() + + assert _infer_literal(Script, '1_2_3') == 123 + assert _infer_literal(Script, '123_456_789') == 123456789 + assert _infer_literal(Script, '0x3_4') == 52 + assert _infer_literal(Script, '0b1_0') == 2 + assert _infer_literal(Script, '0o1_0') == 8 diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_mixed.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_mixed.py new file mode 100644 index 000000000..b4e6c65d6 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_mixed.py @@ -0,0 +1,114 @@ +from typing import Generic, TypeVar, List + +import pytest + +import jedi +from jedi.inference.value import ModuleValue + + +def interpreter(code, namespace, *args, **kwargs): + return jedi.Interpreter(code, [namespace], *args, **kwargs) + + +def test_on_code(): + from functools import wraps + i = interpreter("wraps.__code__", {'wraps': wraps}) + assert i.infer() + + +def test_generics_without_definition(): + # Used to raise a recursion error + T = TypeVar('T') + + class Stack(Generic[T]): + def __init__(self): + self.items = [] # type: List[T] + + def push(self, item): + self.items.append(item) + + def pop(self): + # type: () -> T + return self.items.pop() + + class StackWrapper(): + def __init__(self): + self.stack = Stack() + self.stack.push(1) + + s = StackWrapper() + assert not interpreter('s.stack.pop().', locals()).complete() + + +@pytest.mark.parametrize( + 'code, expected', [ + ('Foo().method()', 'str'), + ('Foo.method()', 'str'), + ('foo.method()', 'str'), + ('Foo().read()', 'str'), + ('Foo.read()', 'str'), + ('foo.read()', 'str'), + ] +) +def test_generics_methods(code, expected, class_findable): + T = TypeVar("T") + + class Reader(Generic[T]): + @classmethod + def read(cls) -> T: + return cls() + + def method(self) -> T: + return 1 + + class Foo(Reader[str]): + def transform(self) -> int: + return 42 + + foo = Foo() + + defs = jedi.Interpreter(code, [locals()]).infer() + if class_findable: + def_, = defs + assert def_.name == expected + else: + assert not defs + + +def test_mixed_module_cache(): + """Caused by #1479""" + interpreter = jedi.Interpreter('jedi', [{'jedi': jedi}]) + d, = interpreter.infer() + assert d.name == 'jedi' + inference_state = interpreter._inference_state + jedi_module, = inference_state.module_cache.get(('jedi',)) + assert isinstance(jedi_module, ModuleValue) + + +def test_signature(): + """ + For performance reasons we use the signature of the compiled object and not + the tree object. + """ + def some_signature(foo): + pass + + from inspect import Signature, Parameter + some_signature.__signature__ = Signature([ + Parameter('bar', kind=Parameter.KEYWORD_ONLY, default=1) + ]) + + s, = jedi.Interpreter('some_signature', [locals()]).goto() + assert s.docstring() == 'some_signature(*, bar=1)' + + +def test_compiled_signature_annotation_string(): + import typing + + def func(x: typing.Type, y: typing.Union[typing.Type, int]): + pass + func.__name__ = 'not_func' + + s, = jedi.Interpreter('func()', [locals()]).get_signatures(1, 5) + assert s.params[0].description == 'param x: Type' + assert s.params[1].description == 'param y: Union[Type, int]' diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_namespace_package.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_namespace_package.py new file mode 100644 index 000000000..69e91354c --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_namespace_package.py @@ -0,0 +1,92 @@ +from os.path import join + +import pytest +import py + +from ..helpers import get_example_dir, example_dir +from jedi import Project + + +SYS_PATH = [get_example_dir('namespace_package', 'ns1'), + get_example_dir('namespace_package', 'ns2')] + + +def script_with_path(Script, *args, **kwargs): + return Script(project=Project('.', sys_path=SYS_PATH), *args, **kwargs) + + +def test_goto_definition(Script): + assert script_with_path(Script, 'from pkg import ns1_file').infer() + assert script_with_path(Script, 'from pkg import ns2_file').infer() + assert not script_with_path(Script, 'from pkg import ns3_file').infer() + + +@pytest.mark.parametrize( + ('source', 'solution'), [ + ('from pkg.ns2_folder.nested import foo', 'nested!'), + ('from pkg.ns2_folder import foo', 'ns2_folder!'), + ('from pkg.ns2_file import foo', 'ns2_file!'), + ('from pkg.ns1_folder import foo', 'ns1_folder!'), + ('from pkg.ns1_file import foo', 'ns1_file!'), + ('from pkg import foo', 'ns1!'), + ] +) +def test_goto_assignment(Script, source, solution): + ass = script_with_path(Script, source).goto() + assert len(ass) == 1 + assert ass[0].description == "foo = '%s'" % solution + + +def test_simple_completions(Script): + # completion + completions = script_with_path(Script, 'from pkg import ').complete() + names = [c.name for c in completions] + compare = ['foo', 'ns1_file', 'ns1_folder', 'ns2_folder', 'ns2_file', + 'pkg_resources', 'pkgutil', '__name__', '__path__', + '__package__', '__file__', '__doc__'] + # must at least contain these items, other items are not important + assert set(compare) == set(names) + + +@pytest.mark.parametrize( + ('source', 'solution'), [ + ('from pkg import ns2_folder as x', 'ns2_folder!'), + ('from pkg import ns2_file as x', 'ns2_file!'), + ('from pkg.ns2_folder import nested as x', 'nested!'), + ('from pkg import ns1_folder as x', 'ns1_folder!'), + ('from pkg import ns1_file as x', 'ns1_file!'), + ('import pkg as x', 'ns1!'), + ] +) +def test_completions(Script, source, solution): + for c in script_with_path(Script, source + '; x.').complete(): + if c.name == 'foo': + completion = c + solution = "foo = '%s'" % solution + assert completion.description == solution + + +def test_nested_namespace_package(Script): + code = 'from nested_namespaces.namespace.pkg import CONST' + + sys_path = [example_dir] + project = Project('.', sys_path=sys_path) + result = Script(code, project=project).infer(line=1, column=45) + + assert len(result) == 1 + + +def test_relative_import(Script, environment, tmpdir): + """ + Attempt a relative import in a very simple namespace package. + """ + directory = get_example_dir('namespace_package_relative_import') + # Need to copy the content in a directory where there's no __init__.py. + py.path.local(directory).copy(tmpdir) + file_path = join(tmpdir.strpath, "rel1.py") + script = Script(path=file_path) + d, = script.infer(line=1) + assert d.name == 'int' + d, = script.goto(line=1) + assert d.name == 'name' + assert d.module_name == 'rel2' diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_precedence.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_precedence.py new file mode 100644 index 000000000..f92e2af5d --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_precedence.py @@ -0,0 +1,16 @@ +from jedi.inference.compiled import CompiledValue + +import pytest + + +@pytest.mark.parametrize('source', [ + pytest.param('1 == 1'), + pytest.param('1.0 == 1'), + # Unfortunately for now not possible, because it's a typeshed object. + pytest.param('... == ...', marks=pytest.mark.xfail), +]) +def test_equals(Script, environment, source): + script = Script(source) + node = script._module_node.children[0] + first, = script._get_module_context().infer_node(node) + assert isinstance(first, CompiledValue) and first.get_safe_value() is True diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_pyc.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_pyc.py new file mode 100644 index 000000000..f5ef8b29b --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_pyc.py @@ -0,0 +1,78 @@ +""" +Test completions from *.pyc files: + + - generate a dummy python module + - compile the dummy module to generate a *.pyc + - delete the pure python dummy module + - try jedi on the generated *.pyc +""" +import os +import shutil +import sys + +import pytest + +import jedi +from jedi.api.environment import SameEnvironment, InterpreterEnvironment + + +SRC = """class Foo: + pass + +class Bar: + pass +""" + + +@pytest.fixture +def pyc_project_path(tmpdir): + path = tmpdir.strpath + dummy_package_path = os.path.join(path, "dummy_package") + os.mkdir(dummy_package_path) + with open(os.path.join(dummy_package_path, "__init__.py"), 'w', newline=''): + pass + + dummy_path = os.path.join(dummy_package_path, 'dummy.py') + with open(dummy_path, 'w', newline='') as f: + f.write(SRC) + import compileall + compileall.compile_file(dummy_path) + os.remove(dummy_path) + + # To import pyc modules, we must move them out of the __pycache__ + # directory and rename them to remove ".cpython-%s%d" + # see: http://stackoverflow.com/questions/11648440/python-does-not-detect-pyc-files + pycache = os.path.join(dummy_package_path, "__pycache__") + for f in os.listdir(pycache): + dst = f.replace('.cpython-%s%s' % sys.version_info[:2], "") + dst = os.path.join(dummy_package_path, dst) + shutil.copy(os.path.join(pycache, f), dst) + try: + yield path + finally: + shutil.rmtree(path) + + +@pytest.mark.parametrize('load_unsafe_extensions', [False, True]) +def test_pyc(pyc_project_path, environment, load_unsafe_extensions): + """ + The list of completion must be greater than 2. + """ + path = os.path.join(pyc_project_path, 'blub.py') + if not isinstance(environment, InterpreterEnvironment): + # We are using the same version for pyc completions here, because it + # was compiled in that version. However with interpreter environments + # we also have the same version and it's easier to debug. + environment = SameEnvironment() + environment = environment + project = jedi.Project(pyc_project_path, load_unsafe_extensions=load_unsafe_extensions) + s = jedi.Script( + "from dummy_package import dummy; dummy.", + path=path, + environment=environment, + project=project, + ) + if load_unsafe_extensions: + assert len(s.complete()) >= 2 + else: + assert not s.complete() diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_representation.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_representation.py new file mode 100644 index 000000000..fff323f79 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_representation.py @@ -0,0 +1,34 @@ +from textwrap import dedent + + +def get_definition_and_inference_state(Script, source): + first, = Script(dedent(source)).infer() + return first._name._value, first._inference_state + + +def test_function_execution(Script): + """ + We've been having an issue of a mutable list that was changed inside the + function execution. Test if an execution always returns the same result. + """ + + s = """ + def x(): + return str() + x""" + func, inference_state = get_definition_and_inference_state(Script, s) + # Now just use the internals of the result (easiest way to get a fully + # usable function). + # Should return the same result both times. + assert len(func.execute_with_values()) == 1 + assert len(func.execute_with_values()) == 1 + + +def test_class_mro(Script): + s = """ + class X(object): + pass + X""" + cls, inference_state = get_definition_and_inference_state(Script, s) + mro = cls.py__mro__() + assert [c.name.string_name for c in mro] == ['X', 'object'] diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_signature.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_signature.py new file mode 100644 index 000000000..9ee8a55c9 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_signature.py @@ -0,0 +1,408 @@ +from textwrap import dedent +from operator import ge, lt +import re +import os + +import pytest + +from jedi.inference.gradual.conversion import _stub_to_python_value_set +from ..helpers import get_example_dir + + +@pytest.mark.parametrize( + 'code, sig, names, op, version', [ + ('import math; math.cos', 'cos(x, /)', ['x'], ge, (3, 6)), + + ('next', 'next(iterator, default=None, /)', ['iterator', 'default'], ge, (3, 6)), + + ('str', "str(object='', /) -> str", ['object'], ge, (3, 6)), + + ('pow', 'pow(x, y, z=None, /) -> number', ['x', 'y', 'z'], lt, (3, 6)), + ('pow', 'pow(base, exp, mod=None)', ['base', 'exp', 'mod'], ge, (3, 8)), + + ('bytes.partition', 'partition(self, sep, /) -> (head, sep, tail)', + ['self', 'sep'], lt, (3, 6)), + ('bytes.partition', 'partition(self, sep, /)', ['self', 'sep'], ge, (3, 6)), + + ('bytes().partition', 'partition(sep, /) -> (head, sep, tail)', ['sep'], lt, (3, 6)), + ('bytes().partition', 'partition(sep, /)', ['sep'], ge, (3, 6)), + ] +) +def test_compiled_signature(Script, environment, code, sig, names, op, version): + if not op(environment.version_info, version): + return # The test right next to it should take over. + + d, = Script(code).infer() + value, = d._name.infer() + compiled, = _stub_to_python_value_set(value) + signature, = compiled.get_signatures() + assert signature.to_string() == sig + assert [n.string_name for n in signature.get_param_names()] == names + + +classmethod_code = ''' +class X: + @classmethod + def x(cls, a, b): + pass + + @staticmethod + def static(a, b): + pass +''' + + +partial_code = ''' +import functools + +def func(a, b, c): + pass + +a = functools.partial(func) +b = functools.partial(func, 1) +c = functools.partial(func, 1, c=2) +d = functools.partial() +''' + + +partialmethod_code = ''' +import functools + +class X: + def func(self, a, b, c): + pass + a = functools.partialmethod(func) + b = functools.partialmethod(func, 1) + c = functools.partialmethod(func, 1, c=2) + d = functools.partialmethod() +''' + + +@pytest.mark.parametrize( + 'code, expected', [ + ('def f(a, * args, x): pass\n f(', 'f(a, *args, x)'), + ('def f(a, *, x): pass\n f(', 'f(a, *, x)'), + ('def f(*, x= 3,**kwargs): pass\n f(', 'f(*, x=3, **kwargs)'), + ('def f(x,/,y,* ,z): pass\n f(', 'f(x, /, y, *, z)'), + ('def f(a, /, *, x=3, **kwargs): pass\n f(', 'f(a, /, *, x=3, **kwargs)'), + + (classmethod_code + 'X.x(', 'x(a, b)'), + (classmethod_code + 'X().x(', 'x(a, b)'), + (classmethod_code + 'X.static(', 'static(a, b)'), + (classmethod_code + 'X().static(', 'static(a, b)'), + + (partial_code + 'a(', 'func(a, b, c)'), + (partial_code + 'b(', 'func(b, c)'), + (partial_code + 'c(', 'func(b)'), + (partial_code + 'd(', None), + + (partialmethod_code + 'X().a(', 'func(a, b, c)'), + (partialmethod_code + 'X().b(', 'func(b, c)'), + (partialmethod_code + 'X().c(', 'func(b)'), + (partialmethod_code + 'X().d(', None), + (partialmethod_code + 'X.c(', 'func(a, b)'), + (partialmethod_code + 'X.d(', None), + + ('import contextlib\n@contextlib.contextmanager\ndef f(x): pass\nf(', 'f(x)'), + + # typing lib + ('from typing import cast\ncast(', { + 'cast(typ: object, val: Any) -> Any', + 'cast(typ: str, val: Any) -> Any', + 'cast(typ: Type[_T], val: Any) -> _T'}), + ('from typing import TypeVar\nTypeVar(', + 'TypeVar(name: str, *constraints: Type[Any], bound: Union[None, Type[Any], str]=..., ' + 'covariant: bool=..., contravariant: bool=...)'), + ('from typing import List\nList(', None), + ('from typing import List\nList[int](', None), + ('from typing import Tuple\nTuple(', None), + ('from typing import Tuple\nTuple[int](', None), + ('from typing import Optional\nOptional(', None), + ('from typing import Optional\nOptional[int](', None), + ('from typing import Any\nAny(', None), + ('from typing import NewType\nNewType(', 'NewType(name: str, tp: Type[_T]) -> Type[_T]'), + ] +) +def test_tree_signature(Script, environment, code, expected): + # Only test this in the latest version, because of / + if environment.version_info < (3, 8): + pytest.skip() + + if expected is None: + assert not Script(code).get_signatures() + else: + actual = {sig.to_string() for sig in Script(code).get_signatures()} + if not isinstance(expected, set): + expected = {expected} + assert expected == actual + + +@pytest.mark.parametrize( + 'combination, expected', [ + # Functions + ('full_redirect(simple)', 'b, *, c'), + ('full_redirect(simple4)', 'b, x: int'), + ('full_redirect(a)', 'b, *args'), + ('full_redirect(kw)', 'b, *, c, **kwargs'), + ('full_redirect(akw)', 'c, *args, **kwargs'), + + # Non functions + ('full_redirect(lambda x, y: ...)', 'y'), + ('full_redirect()', '*args, **kwargs'), + ('full_redirect(1)', '*args, **kwargs'), + + # Classes / inheritance + ('full_redirect(C)', 'z, *, c'), + ('full_redirect(C())', 'y'), + ('full_redirect(G)', 't: T'), + ('full_redirect(G[str])', '*args, **kwargs'), + ('D', 'D(a, z, /)'), + ('D()', 'D(x, y)'), + ('D().foo', 'foo(a, *, bar, z, **kwargs)'), + + # Merging + ('two_redirects(simple, simple)', 'a, b, *, c'), + ('two_redirects(simple2, simple2)', 'x'), + ('two_redirects(akw, kw)', 'a, c, *args, **kwargs'), + ('two_redirects(kw, akw)', 'a, b, *args, c, **kwargs'), + + ('two_kwargs_redirects(simple, simple)', '*args, a, b, c'), + ('two_kwargs_redirects(kw, kw)', '*args, a, b, c, **kwargs'), + ('two_kwargs_redirects(simple, kw)', '*args, a, b, c, **kwargs'), + ('two_kwargs_redirects(simple2, two_kwargs_redirects(simple, simple))', + '*args, x, a, b, c'), + + ('combined_redirect(simple, simple2)', 'a, b, /, *, x'), + ('combined_redirect(simple, simple3)', 'a, b, /, *, a, x: int'), + ('combined_redirect(simple2, simple)', 'x, /, *, a, b, c'), + ('combined_redirect(simple3, simple)', 'a, x: int, /, *, a, b, c'), + + ('combined_redirect(simple, kw)', 'a, b, /, *, a, b, c, **kwargs'), + ('combined_redirect(kw, simple)', 'a, b, /, *, a, b, c'), + ('combined_redirect(simple, simple2)', 'a, b, /, *, x'), + + ('combined_lot_of_args(kw, simple4)', '*, b'), + ('combined_lot_of_args(simple4, kw)', '*, b, c, **kwargs'), + + ('combined_redirect(combined_redirect(simple2, simple4), combined_redirect(kw, simple5))', + 'x, /, *, y'), + ('combined_redirect(combined_redirect(simple4, simple2), combined_redirect(simple5, kw))', + 'a, b, x: int, /, *, a, b, c, **kwargs'), + ('combined_redirect(combined_redirect(a, kw), combined_redirect(kw, simple5))', + 'a, b, /, *args, y'), + + ('no_redirect(kw)', '*args, **kwargs'), + ('no_redirect(akw)', '*args, **kwargs'), + ('no_redirect(simple)', '*args, **kwargs'), + ] +) +def test_nested_signatures(Script, environment, combination, expected): + code = dedent(''' + def simple(a, b, *, c): ... + def simple2(x): ... + def simple3(a, x: int): ... + def simple4(a, b, x: int): ... + def simple5(y): ... + def a(a, b, *args): ... + def kw(a, b, *, c, **kwargs): ... + def akw(a, c, *args, **kwargs): ... + + def no_redirect(func): + return lambda *args, **kwargs: func(1) + def full_redirect(func): + return lambda *args, **kwargs: func(1, *args, **kwargs) + def two_redirects(func1, func2): + return lambda *args, **kwargs: func1(*args, **kwargs) + func2(1, *args, **kwargs) + def two_kwargs_redirects(func1, func2): + return lambda *args, **kwargs: func1(**kwargs) + func2(1, **kwargs) + def combined_redirect(func1, func2): + return lambda *args, **kwargs: func1(*args) + func2(**kwargs) + def combined_lot_of_args(func1, func2): + return lambda *args, **kwargs: func1(1, 2, 3, 4, *args) + func2(a=3, x=1, y=1, **kwargs) + + class C: + def __init__(self, a, z, *, c): ... + def __call__(self, x, y): ... + + def foo(self, bar, z, **kwargs): ... + + class D(C): + def __init__(self, *args): + super().__init__(*args) + + def foo(self, a, **kwargs): + super().foo(**kwargs) + + from typing import Generic, TypeVar + T = TypeVar('T') + class G(Generic[T]): + def __init__(self, i, t: T): ... + ''') + code += 'z = ' + combination + '\nz(' + sig, = Script(code).get_signatures() + computed = sig.to_string() + if not re.match(r'\w+\(', expected): + expected = '(' + expected + ')' + assert expected == computed + + +def test_pow_signature(Script, environment): + # See github #1357 + sigs = Script('pow(').get_signatures() + strings = {sig.to_string() for sig in sigs} + if environment.version_info < (3, 8): + assert strings == {'pow(base: _SupportsPow2[_E, _T_co], exp: _E, /) -> _T_co', + 'pow(base: _SupportsPow3[_E, _M, _T_co], exp: _E, mod: _M, /) -> _T_co', + 'pow(base: float, exp: float, mod: None=..., /) -> float', + 'pow(base: int, exp: int, mod: None=..., /) -> Any', + 'pow(base: int, exp: int, mod: int, /) -> int'} + else: + assert strings == {'pow(base: _SupportsPow2[_E, _T_co], exp: _E) -> _T_co', + 'pow(base: _SupportsPow3[_E, _M, _T_co], exp: _E, mod: _M) -> _T_co', + 'pow(base: float, exp: float, mod: None=...) -> float', + 'pow(base: int, exp: int, mod: None=...) -> Any', + 'pow(base: int, exp: int, mod: int) -> int'} + + +@pytest.mark.parametrize( + 'code, signature', [ + [dedent(''' + # identifier:A + import functools + def f(x): + pass + def x(f): + @functools.wraps(f) + def wrapper(*args): + return f(*args) + return wrapper + + x(f)('''), 'f(x, /)'], + [dedent(''' + # identifier:B + import functools + def f(x): + pass + def x(f): + @functools.wraps(f) + def wrapper(): + # Have no arguments here, but because of wraps, the signature + # should still be f's. + return 1 + return wrapper + + x(f)('''), 'f()'], + [dedent(''' + # identifier:C + import functools + def f(x: int, y: float): + pass + + @functools.wraps(f) + def wrapper(*args, **kwargs): + return f(*args, **kwargs) + + wrapper('''), 'f(x: int, y: float)'], + [dedent(''' + # identifier:D + def f(x: int, y: float): + pass + + def wrapper(*args, **kwargs): + return f(*args, **kwargs) + + wrapper('''), 'wrapper(x: int, y: float)'], + ] +) +def test_wraps_signature(Script, code, signature): + sigs = Script(code).get_signatures() + assert {sig.to_string() for sig in sigs} == {signature} + + +@pytest.mark.parametrize( + 'start, start_params', [ + ['@dataclass\nclass X:', []], + ['@dataclass(eq=True)\nclass X:', []], + [dedent(''' + class Y(): + y: int + @dataclass + class X(Y):'''), []], + [dedent(''' + @dataclass + class Y(): + y: int + z = 5 + @dataclass + class X(Y):'''), ['y']], + ] +) +def test_dataclass_signature(Script, skip_pre_python37, start, start_params): + code = dedent(''' + name: str + foo = 3 + price: float + quantity: int = 0.0 + + X(''') + + code = 'from dataclasses import dataclass\n' + start + code + + sig, = Script(code).get_signatures() + assert [p.name for p in sig.params] == start_params + ['name', 'price', 'quantity'] + quantity, = sig.params[-1].infer() + assert quantity.name == 'int' + price, = sig.params[-2].infer() + assert price.name == 'float' + + +@pytest.mark.parametrize( + 'stmt, expected', [ + ('args = 1', 'wrapped(*args, b, c)'), + ('args = (1,)', 'wrapped(*args, c)'), + ('kwargs = 1', 'wrapped(b, /, **kwargs)'), + ('kwargs = dict(b=3)', 'wrapped(b, /, **kwargs)'), + ] +) +def test_param_resolving_to_static(Script, stmt, expected): + code = dedent('''\ + def full_redirect(func): + def wrapped(*args, **kwargs): + {stmt} + return func(1, *args, **kwargs) + return wrapped + def simple(a, b, *, c): ... + full_redirect(simple)('''.format(stmt=stmt)) + + sig, = Script(code).get_signatures() + assert sig.to_string() == expected + + +@pytest.mark.parametrize( + 'code', [ + 'from file import with_overload; with_overload(', + 'from file import *\nwith_overload(', + ] +) +def test_overload(Script, code): + dir_ = get_example_dir('typing_overload') + x1, x2 = Script(code, path=os.path.join(dir_, 'foo.py')).get_signatures() + assert x1.to_string() == 'with_overload(x: int, y: int) -> float' + assert x2.to_string() == 'with_overload(x: str, y: list) -> float' + + +def test_enum(Script): + script = Script('''\ + from enum import Enum + + class Planet(Enum): + MERCURY = (3.303e+23, 2.4397e6) + VENUS = (4.869e+24, 6.0518e6) + + def __init__(self, mass, radius): + self.mass = mass # in kilograms + self.radius = radius # in meters + + Planet.MERCURY''') + completion, = script.complete() + assert not completion.get_signatures() diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_stdlib.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_stdlib.py new file mode 100644 index 000000000..151fd8f66 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_stdlib.py @@ -0,0 +1,97 @@ +""" +Tests of various stdlib related things that could not be tested +with "Black Box Tests". +""" +from textwrap import dedent + +import pytest + + +@pytest.mark.parametrize(['letter', 'expected'], [ + ('n', ['name']), + ('s', ['smart']), +]) +def test_namedtuple_str(letter, expected, Script): + source = dedent("""\ + import collections + Person = collections.namedtuple('Person', 'name smart') + dave = Person('Dave', False) + dave.%s""") % letter + result = Script(source).complete() + completions = set(r.name for r in result) + assert completions == set(expected) + + +def test_namedtuple_list(Script): + source = dedent("""\ + import collections + Cat = collections.namedtuple('Person', ['legs', u'length', 'large']) + garfield = Cat(4, '85cm', True) + garfield.l""") + result = Script(source).complete() + completions = set(r.name for r in result) + assert completions == {'legs', 'length', 'large'} + + +def test_namedtuple_content(Script): + source = dedent("""\ + import collections + Foo = collections.namedtuple('Foo', ['bar', 'baz']) + named = Foo(baz=4, bar=3.0) + unnamed = Foo(4, '') + """) + + def d(source): + x, = Script(source).infer() + return x.name + + assert d(source + 'unnamed.bar') == 'int' + assert d(source + 'unnamed.baz') == 'str' + assert d(source + 'named.bar') == 'float' + assert d(source + 'named.baz') == 'int' + + +def test_nested_namedtuples(Script): + """ + From issue #730. + """ + s = Script(dedent(''' + import collections + Dataset = collections.namedtuple('Dataset', ['data']) + Datasets = collections.namedtuple('Datasets', ['train']) + train_x = Datasets(train=Dataset('data_value')) + train_x.train.''')) + assert 'data' in [c.name for c in s.complete()] + + +def test_namedtuple_infer(Script): + source = dedent(""" + from collections import namedtuple + + Foo = namedtuple('Foo', 'id timestamp gps_timestamp attributes') + Foo""") + + from jedi.api import Script + + d1, = Script(source).infer() + + assert d1.get_line_code() == "class Foo(tuple):\n" + assert d1.module_path is None + assert d1.docstring() == 'Foo(id, timestamp, gps_timestamp, attributes)' + + +def test_re_sub(Script, environment): + """ + This whole test was taken out of completion/stdlib.py, because of the + version differences. + """ + def run(code): + defs = Script(code).infer() + return {d.name for d in defs} + + names = run("import re; re.sub('a', 'a', 'f')") + assert names == {'str'} + + # This param is missing because of overloading. + names = run("import re; re.sub('a', 'a')") + assert names == {'str', 'bytes'} diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_sys_path.py b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_sys_path.py new file mode 100644 index 000000000..2fa0e4df8 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_inference/test_sys_path.py @@ -0,0 +1,110 @@ +import os +from glob import glob +import sys +import shutil +from pathlib import Path + +import pytest + +from ..helpers import skip_if_windows, skip_if_not_windows, get_example_dir +from jedi.inference import sys_path +from jedi.api.environment import create_environment + + +def test_paths_from_assignment(Script): + def paths(src): + script = Script(src, path='/foo/bar.py') + expr_stmt = script._module_node.children[0] + return set(sys_path._paths_from_assignment(script._get_module_context(), expr_stmt)) + + # Normalize paths for Windows. + path_a = Path('/foo/a').absolute() + path_b = Path('/foo/b').absolute() + path_c = Path('/foo/c').absolute() + + assert paths('sys.path[0:0] = ["a"]') == {path_a} + assert paths('sys.path = ["b", 1, x + 3, y, "c"]') == {path_b, path_c} + assert paths('sys.path = a = ["a"]') == {path_a} + + # Fail for complicated examples. + assert paths('sys.path, other = ["a"], 2') == set() + + +def test_venv_and_pths(venv_path): + pjoin = os.path.join + + site_pkg_path = pjoin(venv_path, 'lib') + if os.name == 'nt': + site_pkg_path = pjoin(site_pkg_path, 'site-packages') + else: + site_pkg_path = glob(pjoin(site_pkg_path, 'python*', 'site-packages'))[0] + shutil.rmtree(site_pkg_path) + shutil.copytree(get_example_dir('sample_venvs', 'pth_directory'), site_pkg_path) + + virtualenv = create_environment(venv_path) + venv_paths = virtualenv.get_sys_path() + + ETALON = [ + # For now disable egg-links. I have no idea how they work... ~ dave + #pjoin('/path', 'from', 'egg-link'), + #pjoin(site_pkg_path, '.', 'relative', 'egg-link', 'path'), + site_pkg_path, + pjoin(site_pkg_path, 'dir-from-foo-pth'), + '/foo/smth.py:module', + # Not sure why it's added twice. It has to do with site.py which is not + # something we can change. However this obviously also doesn't matter. + '/foo/smth.py:from_func', + '/foo/smth.py:from_func', + ] + + # Ensure that pth and egg-link paths were added. + assert venv_paths[-len(ETALON):] == ETALON + + # Ensure that none of venv dirs leaked to the interpreter. + assert not set(sys.path).intersection(ETALON) + + +_s = ['/a', '/b', '/c/d/'] + + +@pytest.mark.parametrize( + 'sys_path_, module_path, expected, is_package', [ + (_s, '/a/b', ('b',), False), + (_s, '/a/b/c', ('b', 'c'), False), + (_s, '/a/b.py', ('b',), False), + (_s, '/a/b/c.py', ('b', 'c'), False), + (_s, '/x/b.py', None, False), + (_s, '/c/d/x.py', ('x',), False), + (_s, '/c/d/x.py', ('x',), False), + (_s, '/c/d/x/y.py', ('x', 'y'), False), + # If dots are in there they also resolve. These are obviously illegal + # in Python, but Jedi can handle them. Give the user a bit more freedom + # that he will have to correct eventually. + (_s, '/a/b.c.py', ('b.c',), False), + (_s, '/a/b.d/foo.bar.py', ('b.d', 'foo.bar'), False), + + (_s, '/a/.py', None, False), + (_s, '/a/c/.py', None, False), + + (['/foo'], '/foo/bar/__init__.py', ('bar',), True), + (['/foo'], '/foo/bar/baz/__init__.py', ('bar', 'baz'), True), + + skip_if_windows(['/foo'], '/foo/bar.so', ('bar',), False), + skip_if_windows(['/foo'], '/foo/bar/__init__.so', ('bar',), True), + skip_if_not_windows(['/foo'], '/foo/bar.pyd', ('bar',), False), + skip_if_not_windows(['/foo'], '/foo/bar/__init__.pyd', ('bar',), True), + + (['/foo'], '/x/bar.py', None, False), + (['/foo'], '/foo/bar.xyz', ('bar.xyz',), False), + + (['/foo', '/foo/bar'], '/foo/bar/baz', ('baz',), False), + (['/foo/bar', '/foo'], '/foo/bar/baz', ('baz',), False), + + (['/'], '/bar/baz.py', ('bar', 'baz',), False), + ]) +def test_transform_path_to_dotted(sys_path_, module_path, expected, is_package): + # transform_path_to_dotted expects normalized absolute paths. + sys_path_ = [os.path.abspath(path) for path in sys_path_] + module_path = os.path.abspath(module_path) + assert sys_path.transform_path_to_dotted(sys_path_, Path(module_path)) \ + == (expected, is_package) diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_integration.py b/bundle/jedi-vim/pythonx/jedi/test/test_integration.py new file mode 100644 index 000000000..2e5703aaf --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_integration.py @@ -0,0 +1,76 @@ +import os + +import pytest + +from . import helpers +from jedi.common import indent_block +from jedi import RefactoringError + + +def assert_case_equal(case, actual, desired): + """ + Assert ``actual == desired`` with formatted message. + + This is not needed for typical pytest use case, but as we need + ``--assert=plain`` (see ../pytest.ini) to workaround some issue + due to pytest magic, let's format the message by hand. + """ + assert actual == desired, """ +Test %r failed. +actual = +%s +desired = +%s +""" % (case, indent_block(str(actual)), indent_block(str(desired))) + + +def assert_static_analysis(case, actual, desired): + """A nicer formatting for static analysis tests.""" + a = set(actual) + d = set(desired) + assert actual == desired, """ +Test %r failed. +not raised = %s +unspecified = %s +""" % (case, sorted(d - a), sorted(a - d)) + + +def test_completion(case, monkeypatch, environment, has_django): + skip_reason = case.get_skip_reason(environment) + if skip_reason is not None: + pytest.skip(skip_reason) + + if (not has_django) and case.path.endswith('django.py'): + pytest.skip('Needs django to be installed to run this test.') + repo_root = helpers.root_dir + monkeypatch.chdir(os.path.join(repo_root, 'jedi')) + case.run(assert_case_equal, environment) + + +def test_static_analysis(static_analysis_case, environment): + skip_reason = static_analysis_case.get_skip_reason(environment) + if skip_reason is not None: + pytest.skip(skip_reason) + else: + static_analysis_case.run(assert_static_analysis, environment) + + +def test_refactor(refactor_case, environment): + """ + Run refactoring test case. + + :type refactor_case: :class:`.refactor.RefactoringCase` + """ + desired_result = refactor_case.get_desired_result() + if refactor_case.type == 'error': + with pytest.raises(RefactoringError) as e: + refactor_case.refactor(environment) + assert e.value.args[0] == desired_result.strip() + elif refactor_case.type == 'text': + refactoring = refactor_case.refactor(environment) + assert not refactoring.get_renames() + text = ''.join(f.get_new_code() for f in refactoring.get_changed_files().values()) + assert_case_equal(refactor_case, text, desired_result) + else: + diff = refactor_case.refactor(environment).get_diff() + assert_case_equal(refactor_case, diff, desired_result) diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_parso_integration/__init__.py b/bundle/jedi-vim/pythonx/jedi/test/test_parso_integration/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_parso_integration/test_basic.py b/bundle/jedi-vim/pythonx/jedi/test/test_parso_integration/test_basic.py new file mode 100644 index 000000000..cf1743555 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_parso_integration/test_basic.py @@ -0,0 +1,92 @@ +from textwrap import dedent + +import pytest +from parso import parse + + +def test_form_feed_characters(Script): + s = "\f\nclass Test(object):\n pass" + Script(s).get_signatures(line=2, column=18) + + +def check_p(src): + module_node = parse(src) + assert src == module_node.get_code() + return module_node + + +def test_if(Script): + src = dedent('''\ + def func(): + x = 3 + if x: + def y(): + return x + return y() + + func() + ''') + + # Two parsers needed, one for pass and one for the function. + check_p(src) + assert [d.name for d in Script(src).infer(8, 6)] == ['int'] + + +def test_class_and_if(Script): + src = dedent("""\ + class V: + def __init__(self): + pass + + if 1: + c = 3 + + def a_func(): + return 1 + + # COMMENT + a_func()""") + check_p(src) + assert [d.name for d in Script(src).infer()] == ['int'] + + +def test_add_to_end(Script): + """ + The diff parser doesn't parse everything again. It just updates with the + help of caches, this is an example that didn't work. + """ + + a = dedent("""\ + class Abc(): + def abc(self): + self.x = 3 + + class Two(Abc): + def g(self): + self + """) # ^ here is the first completion + + b = " def h(self):\n" \ + " self." + + def complete(code, line=None, column=None): + script = Script(code, path='example.py') + assert script.complete(line, column) + + complete(a, 7, 12) + complete(a + b) + + a = a[:-1] + '.\n' + complete(a, 7, 13) + complete(a + b) + + +def test_tokenizer_with_string_literal_backslash(Script): + c = Script("statement = u'foo\\\n'; statement").infer() + assert c[0]._name._value.get_safe_value() == 'foo' + + +def test_ellipsis_without_getitem(Script, environment): + def_, = Script('x=...;x').infer() + + assert def_.name == 'ellipsis' diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_parso_integration/test_error_correction.py b/bundle/jedi-vim/pythonx/jedi/test/test_parso_integration/test_error_correction.py new file mode 100644 index 000000000..b7796817b --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_parso_integration/test_error_correction.py @@ -0,0 +1,50 @@ +from textwrap import dedent + + +def test_error_correction_with(Script): + source = """ + with open() as f: + try: + f.""" + comps = Script(source).complete() + assert len(comps) > 30 + # `open` completions have a closed attribute. + assert [1 for c in comps if c.name == 'closed'] + + +def test_string_literals(Script): + """Simplified case of jedi-vim#377.""" + source = dedent(""" + x = ur''' + + def foo(): + pass + """) + + script = Script(dedent(source)) + assert script._get_module_context().tree_node.end_pos == (6, 0) + assert not script.complete() + + +def test_incomplete_function(Script): + source = '''return ImportErr''' + + script = Script(dedent(source)) + assert script.complete(1, 3) + + +def test_decorator_string_issue(Script): + """ + Test case from #589 + """ + source = dedent('''\ + """ + @""" + def bla(): + pass + + bla.''') + + s = Script(source) + assert s.complete() + assert s._get_module_context().tree_node.get_code() == source diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_parso_integration/test_parser_utils.py b/bundle/jedi-vim/pythonx/jedi/test/test_parso_integration/test_parser_utils.py new file mode 100644 index 000000000..d29bf7519 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_parso_integration/test_parser_utils.py @@ -0,0 +1,88 @@ +import gc +from pathlib import Path + +from jedi import parser_utils +from parso import parse +from parso.cache import parser_cache +from parso.python import tree + +import pytest + + +class TestCallAndName: + def get_call(self, source): + # Get the simple_stmt and then the first one. + node = parse(source).children[0] + if node.type == 'simple_stmt': + return node.children[0] + return node + + def test_name_and_call_positions(self): + name = self.get_call('name\nsomething_else') + assert name.value == 'name' + assert name.start_pos == (1, 0) + assert name.end_pos == (1, 4) + + leaf = self.get_call('1.0\n') + assert leaf.value == '1.0' + assert parser_utils.safe_literal_eval(leaf.value) == 1.0 + assert leaf.start_pos == (1, 0) + assert leaf.end_pos == (1, 3) + + def test_call_type(self): + call = self.get_call('hello') + assert isinstance(call, tree.Name) + + def test_literal_type(self): + literal = self.get_call('1.0') + assert isinstance(literal, tree.Literal) + assert type(parser_utils.safe_literal_eval(literal.value)) == float + + literal = self.get_call('1') + assert isinstance(literal, tree.Literal) + assert type(parser_utils.safe_literal_eval(literal.value)) == int + + literal = self.get_call('"hello"') + assert isinstance(literal, tree.Literal) + assert parser_utils.safe_literal_eval(literal.value) == 'hello' + + +def test_hex_values_in_docstring(): + source = r''' + def foo(object): + """ + \xff + """ + return 1 + ''' + + doc = parser_utils.clean_scope_docstring(next(parse(source).iter_funcdefs())) + assert doc == '\xff' + + +@pytest.mark.parametrize( + 'code,signature', [ + ('def my_function(x, typed: Type, z):\n return', 'my_function(x, typed: Type, z)'), + ('def my_function(x, y, z) -> str:\n return', 'my_function(x, y, z) -> str'), + ('lambda x, y, z: x + y * z\n', '(x, y, z)') + ]) +def test_get_signature(code, signature): + node = parse(code, version='3.8').children[0] + if node.type == 'simple_stmt': + node = node.children[0] + assert parser_utils.get_signature(node) == signature + + +def test_parser_cache_clear(Script): + """ + If parso clears its cache, Jedi should not keep those resources, they + should be freed. + """ + script = Script("a = abs\na", path=Path(__file__).parent / 'parser_cache_test_foo.py') + script.complete() + module_id = id(script._module_node) + del parser_cache[script._inference_state.grammar._hashed][script.path] + del script + + gc.collect() + assert module_id not in [id(m) for m in gc.get_referrers(tree.Module)] diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_settings.py b/bundle/jedi-vim/pythonx/jedi/test/test_settings.py new file mode 100644 index 000000000..8ecc4fb84 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_settings.py @@ -0,0 +1,39 @@ +import pytest + +from jedi import settings +from jedi.inference.compiled import CompiledValueName +from jedi.inference.compiled.value import CompiledModule + + +@pytest.fixture() +def auto_import_json(monkeypatch): + monkeypatch.setattr(settings, 'auto_import_modules', ['json']) + + +def test_base_auto_import_modules(auto_import_json, Script): + loads, = Script('import json; json.loads').infer() + assert isinstance(loads._name, CompiledValueName) + value, = loads._name.infer() + assert isinstance(value.parent_context._value, CompiledModule) + + +def test_auto_import_modules_imports(auto_import_json, Script): + main, = Script('from json import tool; tool.main').infer() + assert isinstance(main._name, CompiledValueName) + + +def test_cropped_file_size(monkeypatch, get_names, Script): + code = 'class Foo(): pass\n' + monkeypatch.setattr( + settings, + '_cropped_file_size', + len(code) + ) + + foo, = get_names(code + code) + assert foo.line == 1 + + # It should just not crash if we are outside of the cropped range. + script = Script(code + code + 'Foo') + assert not script.infer() + assert 'Foo' in [c.name for c in script.complete()] diff --git a/bundle/jedi-vim/pythonx/jedi/test/test_utils.py b/bundle/jedi-vim/pythonx/jedi/test/test_utils.py new file mode 100644 index 000000000..0dcf80db2 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi/test/test_utils.py @@ -0,0 +1,119 @@ +try: + import readline +except ImportError: + readline = False +import unittest + +from jedi import utils + + +@unittest.skipIf(not readline, "readline not found") +class TestSetupReadline(unittest.TestCase): + class NameSpace(object): + pass + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.namespace = self.NameSpace() + utils.setup_readline(self.namespace) + + def complete(self, text): + completer = readline.get_completer() + i = 0 + completions = [] + while True: + completion = completer(text, i) + if completion is None: + break + completions.append(completion) + i += 1 + return completions + + def test_simple(self): + assert self.complete('list') == ['list'] + assert self.complete('importerror') == ['ImportError'] + s = "print(BaseE" + assert self.complete(s) == [s + 'xception'] + + def test_nested(self): + assert self.complete('list.Insert') == ['list.insert'] + assert self.complete('list().Insert') == ['list().insert'] + + def test_magic_methods(self): + assert self.complete('list.__getitem__') == ['list.__getitem__'] + assert self.complete('list().__getitem__') == ['list().__getitem__'] + + def test_modules(self): + import sys + import os + self.namespace.sys = sys + self.namespace.os = os + + try: + assert self.complete('os.path.join') == ['os.path.join'] + string = 'os.path.join("a").upper' + assert self.complete(string) == [string] + + c = {'os.' + d for d in dir(os) if d.startswith('ch')} + assert set(self.complete('os.ch')) == set(c) + finally: + del self.namespace.sys + del self.namespace.os + + def test_calls(self): + s = 'str(bytes' + assert self.complete(s) == [s, 'str(BytesWarning'] + + def test_import(self): + s = 'from os.path import a' + assert set(self.complete(s)) == {s + 'ltsep', s + 'bspath'} + assert self.complete('import keyword') == ['import keyword'] + + import os + s = 'from os import ' + goal = {s + el for el in dir(os)} + # There are minor differences, e.g. the dir doesn't include deleted + # items as well as items that are not only available on linux. + difference = set(self.complete(s)).symmetric_difference(goal) + difference = { + x for x in difference + if all(not x.startswith('from os import ' + s) + for s in ['_', 'O_', 'EX_', 'MFD_', 'SF_', 'ST_', + 'CLD_', 'POSIX_SPAWN_', 'P_', 'RWF_', + 'SCHED_']) + } + # There are quite a few differences, because both Windows and Linux + # (posix and nt) librariesare included. + assert len(difference) < 30 + + def test_local_import(self): + s = 'import test.test_utils' + assert self.complete(s) == [s] + + def test_preexisting_values(self): + self.namespace.a = range(10) + assert set(self.complete('a.')) == {'a.' + n for n in dir(range(1))} + del self.namespace.a + + def test_colorama(self): + """ + Only test it if colorama library is available. + + This module is being tested because it uses ``setattr`` at some point, + which Jedi doesn't understand, but it should still work in the REPL. + """ + try: + # if colorama is installed + import colorama + except ImportError: + pass + else: + self.namespace.colorama = colorama + assert self.complete('colorama') + assert self.complete('colorama.Fore.BLACK') == ['colorama.Fore.BLACK'] + del self.namespace.colorama + + +def test_version_info(): + assert utils.version_info()[:2] > (0, 7) diff --git a/bundle/jedi-vim/pythonx/jedi_vim.py b/bundle/jedi-vim/pythonx/jedi_vim.py new file mode 100644 index 000000000..b140668ac --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi_vim.py @@ -0,0 +1,1211 @@ +# -*- coding: utf-8 -*- +""" +The Python parts of the Jedi library for VIM. It is mostly about communicating +with VIM. +""" + +from typing import Optional +import traceback # for exception output +import re +import os +import sys +from shlex import split as shsplit +from contextlib import contextmanager +from pathlib import Path +try: + from itertools import zip_longest +except ImportError: + from itertools import izip_longest as zip_longest # Python 2 + +import vim + +is_py3 = sys.version_info[0] >= 3 +if is_py3: + ELLIPSIS = "…" + unicode = str +else: + ELLIPSIS = u"…" + + +try: + # Somehow sys.prefix is set in combination with VIM and virtualenvs. + # However the sys path is not affected. Just reset it to the normal value. + sys.prefix = sys.base_prefix + sys.exec_prefix = sys.base_exec_prefix +except AttributeError: + # If we're not in a virtualenv we don't care. Everything is fine. + pass + + +class PythonToVimStr(unicode): + """ Vim has a different string implementation of single quotes """ + __slots__ = [] + + def __new__(cls, obj, encoding='UTF-8'): + if not (is_py3 or isinstance(obj, unicode)): + obj = unicode.__new__(cls, obj, encoding) + + # Vim cannot deal with zero bytes: + obj = obj.replace('\0', '\\0') + return unicode.__new__(cls, obj) + + def __repr__(self): + # this is totally stupid and makes no sense but vim/python unicode + # support is pretty bad. don't ask how I came up with this... It just + # works... + # It seems to be related to that bug: http://bugs.python.org/issue5876 + if unicode is str: + s = self + else: + s = self.encode('UTF-8') + return '"%s"' % s.replace('\\', '\\\\').replace('"', r'\"') + + +class VimError(Exception): + def __init__(self, message, throwpoint, executing): + super(type(self), self).__init__(message) + self.message = message + self.throwpoint = throwpoint + self.executing = executing + + def __str__(self): + return "{}; created by {!r} (in {})".format( + self.message, self.executing, self.throwpoint + ) + + +def _catch_exception(string, is_eval): + """ + Interface between vim and python calls back to it. + Necessary, because the exact error message is not given by `vim.error`. + """ + result = vim.eval('jedi#_vim_exceptions({0}, {1})'.format( + repr(PythonToVimStr(string, 'UTF-8')), int(is_eval))) + if 'exception' in result: + raise VimError(result['exception'], result['throwpoint'], string) + return result['result'] + + +def vim_command(string): + _catch_exception(string, is_eval=False) + + +def vim_eval(string): + return _catch_exception(string, is_eval=True) + + +def no_jedi_warning(error=None): + vim.command('echohl WarningMsg') + vim.command('echom "Please install Jedi if you want to use jedi-vim."') + if error: + vim.command('echom "The error was: {0}"'.format(error)) + vim.command('echohl None') + + +def echo_highlight(msg): + vim_command('echohl WarningMsg | echom "jedi-vim: {0}" | echohl None'.format( + str(msg).replace('"', '\\"'))) + + +jedi_path = os.path.join(os.path.dirname(__file__), 'jedi') +sys.path.insert(0, jedi_path) +parso_path = os.path.join(os.path.dirname(__file__), 'parso') +sys.path.insert(0, parso_path) + +try: + import jedi +except ImportError: + jedi = None + jedi_import_error = sys.exc_info() +else: + try: + version = jedi.__version__ + except Exception as e: # e.g. AttributeError + echo_highlight( + "Error when loading the jedi python module ({0}). " + "Please ensure that Jedi is installed correctly (see Installation " + "in the README.".format(e)) + jedi = None + else: + if isinstance(version, str): + # the normal use case, now. + from jedi import utils + version = utils.version_info() + if version < (0, 7): + echo_highlight('Please update your Jedi version, it is too old.') +finally: + sys.path.remove(jedi_path) + sys.path.remove(parso_path) + + +class VimCompat: + _eval_cache = {} + _func_cache = {} + + @classmethod + def has(cls, what): + try: + return cls._eval_cache[what] + except KeyError: + ret = cls._eval_cache[what] = cls.call('has', what) + return ret + + @classmethod + def call(cls, func, *args): + try: + f = cls._func_cache[func] + except KeyError: + if IS_NVIM: + f = cls._func_cache[func] = getattr(vim.funcs, func) + else: + f = cls._func_cache[func] = vim.Function(func) + return f(*args) + + @classmethod + def setqflist(cls, items, title, context): + if cls.has('patch-7.4.2200'): # can set qf title. + what = {'title': title} + if cls.has('patch-8.0.0590'): # can set qf context + what['context'] = {'jedi_usages': context} + if cls.has('patch-8.0.0657'): # can set items via "what". + what['items'] = items + cls.call('setqflist', [], ' ', what) + else: + # Can set title (and maybe context), but needs two calls. + cls.call('setqflist', items) + cls.call('setqflist', items, 'a', what) + else: + cls.call('setqflist', items) + + @classmethod + def setqflist_title(cls, title): + if cls.has('patch-7.4.2200'): + cls.call('setqflist', [], 'a', {'title': title}) + + @classmethod + def can_update_current_qflist_for_context(cls, context): + if cls.has('patch-8.0.0590'): # can set qf context + return cls.call('getqflist', {'context': 1})['context'] == { + 'jedi_usages': context, + } + + +def catch_and_print_exceptions(func): + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except (Exception, vim.error): + print(traceback.format_exc()) + return None + return wrapper + + +def _check_jedi_availability(show_error=False): + def func_receiver(func): + def wrapper(*args, **kwargs): + if jedi is None: + if show_error: + no_jedi_warning() + return + else: + return func(*args, **kwargs) + return wrapper + return func_receiver + + +# Tuple of cache key / project +_current_project_cache = None, None + + +def get_project(): + vim_environment_path = vim_eval( + "get(b:, 'jedi_environment_path', g:jedi#environment_path)" + ) + vim_project_path = vim_eval("g:jedi#project_path") + + vim_added_sys_path = vim_eval("get(g:, 'jedi#added_sys_path', [])") + vim_added_sys_path += vim_eval("get(b:, 'jedi_added_sys_path', [])") + + global _current_project_cache + cache_key = dict(project_path=vim_project_path, + environment_path=vim_environment_path, + added_sys_path=vim_added_sys_path) + if cache_key == _current_project_cache[0]: + return _current_project_cache[1] + + if vim_environment_path in ("auto", "", None): + environment_path = None + else: + environment_path = vim_environment_path + + if vim_project_path in ("auto", "", None): + project_path = jedi.get_default_project().path + else: + project_path = vim_project_path + + project = jedi.Project(project_path, + environment_path=environment_path, + added_sys_path=vim_added_sys_path) + + _current_project_cache = cache_key, project + return project + + +@catch_and_print_exceptions +def choose_environment(): + args = shsplit(vim.eval('a:args')) + + envs = list(jedi.find_system_environments()) + envs.extend(jedi.find_virtualenvs(paths=args or None)) + + env_paths = [env.executable for env in envs] + + vim_command('belowright new') + vim.current.buffer[:] = env_paths + vim.current.buffer.name = "Hit Enter to Choose an Environment" + vim_command( + 'setlocal buftype=nofile bufhidden=wipe noswapfile nobuflisted readonly nomodifiable') + vim_command('noremap :bw') + vim_command('noremap :python3 jedi_vim.choose_environment_hit_enter()') + + +@catch_and_print_exceptions +def choose_environment_hit_enter(): + vim.vars['jedi#environment_path'] = vim.current.line + vim_command('bd') + + +@catch_and_print_exceptions +def load_project(): + path = vim.eval('a:args') + vim.vars['jedi#project_path'] = path + env_path = vim_eval("g:jedi#environment_path") + if env_path == 'auto': + env_path = None + if path: + try: + project = jedi.Project.load(path) + except FileNotFoundError: + project = jedi.Project(path, environment_path=env_path) + project.save() + else: + project = jedi.get_default_project() + path = project.path + project.save() + + global _current_project_cache + cache_key = dict(project_path=path, + environment_path=env_path, + added_sys_path=[]) + _current_project_cache = cache_key, project + + +@catch_and_print_exceptions +def get_script(source=None): + jedi.settings.additional_dynamic_modules = [ + b.name for b in vim.buffers if ( + b.name is not None and + b.name.endswith('.py') and + b.options['buflisted'])] + if source is None: + source = '\n'.join(vim.current.buffer) + buf_path = vim.current.buffer.name + if not buf_path: + # If a buffer has no name its name is an empty string. + buf_path = None + + return jedi.Script(source, path=buf_path, project=get_project()) + + +def get_pos(column=None): + row = vim.current.window.cursor[0] + if column is None: + column = vim.current.window.cursor[1] + return row, column + + +@_check_jedi_availability(show_error=False) +@catch_and_print_exceptions +def completions(): + jedi.settings.case_insensitive_completion = \ + bool(int(vim_eval("get(b:, 'jedi_case_insensitive_completion', " + "g:jedi#case_insensitive_completion)"))) + + row, column = vim.current.window.cursor + # Clear call signatures in the buffer so they aren't seen by the completer. + # Call signatures in the command line can stay. + if int(vim_eval("g:jedi#show_call_signatures")) == 1: + clear_call_signatures() + if vim.eval('a:findstart') == '1': + count = 0 + for char in reversed(vim.current.line[:column]): + if not re.match(r'[\w\d]', char): + break + count += 1 + vim.command('return %i' % (column - count)) + else: + base = vim.eval('a:base') + source = '' + for i, line in enumerate(vim.current.buffer): + # enter this path again, otherwise source would be incomplete + if i == row - 1: + source += line[:column] + base + line[column:] + else: + source += line + source += '\n' + # here again hacks, because jedi has a different interface than vim + column += len(base) + try: + script = get_script(source=source) + completions = script.complete(*get_pos(column)) + signatures = script.get_signatures(*get_pos(column)) + + add_info = "preview" in vim.eval("&completeopt").split(",") + out = [] + for c in completions: + d = dict(word=PythonToVimStr(c.name[:len(base)] + c.complete), + abbr=PythonToVimStr(c.name_with_symbols), + # stuff directly behind the completion + menu=PythonToVimStr(c.description), + icase=1, # case insensitive + dup=1 # allow duplicates (maybe later remove this) + ) + if add_info: + try: + d["info"] = PythonToVimStr(c.docstring()) + except Exception: + print("jedi-vim: error with docstring for %r: %s" % ( + c, traceback.format_exc())) + out.append(d) + + strout = str(out) + except Exception: + # print to stdout, will be in :messages + print(traceback.format_exc()) + strout = '' + completions = [] + signatures = [] + + show_call_signatures(signatures) + vim.command('return ' + strout) + + +@contextmanager +def tempfile(content): + # Using this instead of the tempfile module because Windows won't read + # from a file not yet written to disk + with open(vim_eval('tempname()'), 'w') as f: + f.write(content) + try: + yield f + finally: + os.unlink(f.name) + + +@_check_jedi_availability(show_error=True) +@catch_and_print_exceptions +def goto(mode="goto"): + """ + :param str mode: "definition", "assignment", "goto" + :rtype: list of jedi.api.classes.Name + """ + script = get_script() + pos = get_pos() + if mode == "goto": + names = script.goto(*pos, follow_imports=True) + elif mode == "definition": + names = script.infer(*pos) + elif mode == "assignment": + names = script.goto(*pos) + elif mode == "stubs": + names = script.goto(*pos, follow_imports=True, only_stubs=True) + + if not names: + echo_highlight("Couldn't find any definitions for this.") + elif len(names) == 1 and mode != "related_name": + n = list(names)[0] + _goto_specific_name(n) + else: + show_goto_multi_results(names, mode) + return names + + +def _goto_specific_name(n, options=''): + if n.column is None: + if n.is_keyword: + echo_highlight("Cannot get the definition of Python keywords.") + else: + name = 'Namespaces' if n.type == 'namespace' else 'Builtin modules' + echo_highlight( + "%s cannot be displayed (%s)." + % (name, n.full_name or n.name) + ) + else: + using_tagstack = int(vim_eval('g:jedi#use_tag_stack')) == 1 + result = set_buffer(n.module_path, options=options, + using_tagstack=using_tagstack) + if not result: + return [] + if (using_tagstack and n.module_path and + n.module_path.exists()): + tagname = n.name + with tempfile('{0}\t{1}\t{2}'.format( + tagname, n.module_path, 'call cursor({0}, {1})'.format( + n.line, n.column + 1))) as f: + old_tags = vim.eval('&tags') + old_wildignore = vim.eval('&wildignore') + try: + # Clear wildignore to ensure tag file isn't ignored + vim.command('set wildignore=') + vim.command('let &tags = %s' % + repr(PythonToVimStr(f.name))) + vim.command('tjump %s' % tagname) + finally: + vim.command('let &tags = %s' % + repr(PythonToVimStr(old_tags))) + vim.command('let &wildignore = %s' % + repr(PythonToVimStr(old_wildignore))) + vim.current.window.cursor = n.line, n.column + + +def relpath(path): + """Make path relative to cwd if it is below.""" + abspath = os.path.abspath(path) + if abspath.startswith(os.getcwd()): + return os.path.relpath(path) + return path + + +def annotate_description(n): + code = n.get_line_code().strip() + if n.type == 'statement': + return code + if n.type == 'function': + if code.startswith('def'): + return code + typ = 'def' + else: + typ = n.type + return '[%s] %s' % (typ, code) + + +def show_goto_multi_results(names, mode): + """Create (or reuse) a quickfix list for multiple names.""" + global _current_names + + lst = [] + (row, col) = vim.current.window.cursor + current_idx = None + current_def = None + for n in names: + if n.column is None: + # Typically a namespace, in the future maybe other things as + # well. + lst.append(dict(text=PythonToVimStr(n.description))) + else: + text = annotate_description(n) + lst.append(dict(filename=PythonToVimStr(relpath(str(n.module_path))), + lnum=n.line, col=n.column + 1, + text=PythonToVimStr(text))) + + # Select current/nearest entry via :cc later. + if n.line == row and n.column <= col: + if (current_idx is None + or (abs(lst[current_idx]["col"] - col) + > abs(n.column - col))): + current_idx = len(lst) + current_def = n + + # Build qflist title. + qf_title = mode + if current_def is not None: + if current_def.full_name: + qf_title += ": " + current_def.full_name + else: + qf_title += ": " + str(current_def) + select_entry = current_idx + else: + select_entry = 0 + + qf_context = id(names) + if (_current_names + and VimCompat.can_update_current_qflist_for_context(qf_context)): + # Same list, only adjust title/selected entry. + VimCompat.setqflist_title(qf_title) + vim_command('%dcc' % select_entry) + else: + VimCompat.setqflist(lst, title=qf_title, context=qf_context) + for_usages = mode == "usages" + vim_eval('jedi#add_goto_window(%d, %d)' % (for_usages, len(lst))) + vim_command('%d' % select_entry) + + +def _same_names(a, b): + """Compare without _inference_state. + + Ref: https://github.com/davidhalter/jedi-vim/issues/952) + """ + return all( + x._name.start_pos == y._name.start_pos + and x.module_path == y.module_path + and x.name == y.name + for x, y in zip(a, b) + ) + + +@catch_and_print_exceptions +def usages(visuals=True): + script = get_script() + names = script.get_references(*get_pos()) + if not names: + echo_highlight("No usages found here.") + return names + + if visuals: + global _current_names + + if _current_names: + if _same_names(_current_names, names): + names = _current_names + else: + clear_usages() + assert not _current_names + + show_goto_multi_results(names, "usages") + if not _current_names: + _current_names = names + highlight_usages() + else: + assert names is _current_names # updated above + return names + + +_current_names = None +"""Current definitions to use for highlighting.""" +_pending_names = {} +"""Pending definitions for unloaded buffers.""" +_placed_names_in_buffers = set() +"""Set of buffers for faster cleanup.""" + + +IS_NVIM = hasattr(vim, 'from_nvim') +if IS_NVIM: + vim_prop_add = None +else: + vim_prop_type_added = False + try: + vim_prop_add = vim.Function("prop_add") + except ValueError: + vim_prop_add = None + else: + vim_prop_remove = vim.Function("prop_remove") + + +def clear_usages(): + """Clear existing highlights.""" + global _current_names + if _current_names is None: + return + _current_names = None + + if IS_NVIM: + for buf in _placed_names_in_buffers: + src_ids = buf.vars.get('_jedi_usages_src_ids') + if src_ids is not None: + for src_id in src_ids: + buf.clear_highlight(src_id) + elif vim_prop_add: + for buf in _placed_names_in_buffers: + vim_prop_remove({ + 'type': 'jediUsage', + 'all': 1, + 'bufnr': buf.number, + }) + else: + # Unset current window only. + assert _current_names is None + highlight_usages_for_vim_win() + + _placed_names_in_buffers.clear() + + +def highlight_usages(): + """Set usage names to be highlighted. + + With Neovim it will use the nvim_buf_add_highlight API to highlight all + buffers already. + + With Vim without support for text-properties only the current window is + highlighted via matchaddpos, and autocommands are setup to highlight other + windows on demand. Otherwise Vim's text-properties are used. + """ + global _current_names, _pending_names + + names = _current_names + _pending_names = {} + + if IS_NVIM or vim_prop_add: + bufs = {x.name: x for x in vim.buffers} + defs_per_buf = {} + for name in names: + try: + buf = bufs[str(name.module_path)] + except KeyError: + continue + defs_per_buf.setdefault(buf, []).append(name) + + if IS_NVIM: + # We need to remember highlight ids with Neovim's API. + buf_src_ids = {} + for buf, names in defs_per_buf.items(): + buf_src_ids[buf] = [] + for name in names: + src_id = _add_highlighted_name(buf, name) + buf_src_ids[buf].append(src_id) + for buf, src_ids in buf_src_ids.items(): + buf.vars['_jedi_usages_src_ids'] = src_ids + else: + for buf, names in defs_per_buf.items(): + try: + for name in names: + _add_highlighted_name(buf, name) + except vim.error as exc: + if exc.args[0].startswith('Vim:E275:'): + # "Cannot add text property to unloaded buffer" + _pending_names.setdefault(buf.name, []).extend( + names) + else: + highlight_usages_for_vim_win() + + +def _handle_pending_usages_for_buf(): + """Add (pending) highlights for the current buffer (Vim with textprops).""" + buf = vim.current.buffer + bufname = buf.name + try: + buf_names = _pending_names[bufname] + except KeyError: + return + for name in buf_names: + _add_highlighted_name(buf, name) + del _pending_names[bufname] + + +def _add_highlighted_name(buf, name): + lnum = name.line + start_col = name.column + + # Skip highlighting of module definitions that point to the start + # of the file. + if name.type == 'module' and lnum == 1 and start_col == 0: + return + + _placed_names_in_buffers.add(buf) + + # TODO: validate that name.name is at this position? + # Would skip the module definitions from above already. + + length = len(name.name) + if vim_prop_add: + # XXX: needs jediUsage highlight (via after/syntax/python.vim). + global vim_prop_type_added + if not vim_prop_type_added: + vim.eval("prop_type_add('jediUsage', {'highlight': 'jediUsage'})") + vim_prop_type_added = True + vim_prop_add(lnum, start_col+1, { + 'type': 'jediUsage', + 'bufnr': buf.number, + 'length': length, + }) + return + + assert IS_NVIM + end_col = name.column + length + src_id = buf.add_highlight('jediUsage', lnum-1, start_col, end_col, + src_id=0) + return src_id + + +def highlight_usages_for_vim_win(): + """Highlight usages in the current window. + + It stores the matchids in a window-local variable. + + (matchaddpos() only works for the current window.) + """ + win = vim.current.window + + cur_matchids = win.vars.get('_jedi_usages_vim_matchids') + if cur_matchids: + if cur_matchids[0] == vim.current.buffer.number: + return + + # Need to clear non-matching highlights. + for matchid in cur_matchids[1:]: + expr = 'matchdelete(%d)' % int(matchid) + vim.eval(expr) + + matchids = [] + if _current_names: + buffer_path = vim.current.buffer.name + for name in _current_names: + if (str(name.module_path) or '') == buffer_path: + positions = [ + [name.line, + name.column + 1, + len(name.name)] + ] + expr = "matchaddpos('jediUsage', %s)" % repr(positions) + matchids.append(int(vim_eval(expr))) + + if matchids: + vim.current.window.vars['_jedi_usages_vim_matchids'] = [ + vim.current.buffer.number] + matchids + elif cur_matchids is not None: + # Always set it (uses an empty list for "unset", which is not possible + # using del). + vim.current.window.vars['_jedi_usages_vim_matchids'] = [] + + # Remember if clearing is needed for later buffer autocommands. + vim.current.buffer.vars['_jedi_usages_needs_clear'] = bool(matchids) + + +@_check_jedi_availability(show_error=True) +@catch_and_print_exceptions +def show_documentation(): + script = get_script() + try: + names = script.help(*get_pos()) + except Exception: + # print to stdout, will be in :messages + names = [] + print("Exception, this shouldn't happen.") + print(traceback.format_exc()) + + if not names: + echo_highlight('No documentation found for that.') + vim.command('return') + return + + docs = [] + for n in names: + doc = n.docstring() + if doc: + title = 'Docstring for %s %s' % (n.type, n.full_name or n.name) + underline = '=' * len(title) + docs.append('%s\n%s\n%s' % (title, underline, doc)) + else: + docs.append('|No Docstring for %s|' % n) + text = ('\n' + '-' * 79 + '\n').join(docs) + vim.command('let l:doc = %s' % repr(PythonToVimStr(text))) + vim.command('let l:doc_lines = %s' % len(text.split('\n'))) + return True + + +@catch_and_print_exceptions +def clear_call_signatures(): + # Check if using command line call signatures + if int(vim_eval("g:jedi#show_call_signatures")) == 2: + vim_command('echo ""') + return + cursor = vim.current.window.cursor + e = vim_eval('g:jedi#call_signature_escape') + # We need two turns here to search and replace certain lines: + # 1. Search for a line with a call signature and save the appended + # characters + # 2. Actually replace the line and redo the status quo. + py_regex = r'%sjedi=([0-9]+), (.*?)%s.*?%sjedi%s'.replace( + '%s', re.escape(e)) + for i, line in enumerate(vim.current.buffer): + match = re.search(py_regex, line) + if match is not None: + # Some signs were added to minimize syntax changes due to call + # signatures. We have to remove them again. The number of them is + # specified in `match.group(1)`. + after = line[match.end() + int(match.group(1)):] + line = line[:match.start()] + match.group(2) + after + vim.current.buffer[i] = line + vim.current.window.cursor = cursor + + +@_check_jedi_availability(show_error=False) +@catch_and_print_exceptions +def show_call_signatures(signatures=()): + if int(vim_eval("has('conceal') && g:jedi#show_call_signatures")) == 0: + return + + # We need to clear the signatures before we calculate them again. The + # reason for this is that call signatures are unfortunately written to the + # buffer. + clear_call_signatures() + if signatures == (): + signatures = get_script().get_signatures(*get_pos()) + + if not signatures: + return + + if int(vim_eval("g:jedi#show_call_signatures")) == 2: + return cmdline_call_signatures(signatures) + + seen_sigs = [] + for i, signature in enumerate(signatures): + line, column = signature.bracket_start + # signatures are listed above each other + line_to_replace = line - i - 1 + # because there's a space before the bracket + insert_column = column - 1 + if insert_column < 0 or line_to_replace <= 0: + # Edge cases, when the call signature has no space on the screen. + break + + # TODO check if completion menu is above or below + line = vim_eval("getline(%s)" % line_to_replace) + + # Descriptions are usually looking like `param name`, remove the param. + params = [p.description.replace('\n', '').replace('param ', '', 1) + for p in signature.params] + try: + # *_*PLACEHOLDER*_* makes something fat. See after/syntax file. + params[signature.index] = '*_*%s*_*' % params[signature.index] + except (IndexError, TypeError): + pass + + # Skip duplicates. + if params in seen_sigs: + continue + seen_sigs.append(params) + + # This stuff is reaaaaally a hack! I cannot stress enough, that + # this is a stupid solution. But there is really no other yet. + # There is no possibility in VIM to draw on the screen, but there + # will be one (see :help todo Patch to access screen under Python. + # (Marko Mahni, 2010 Jul 18)) + text = " (%s) " % ', '.join(params) + text = ' ' * (insert_column - len(line)) + text + end_column = insert_column + len(text) - 2 # -2 due to bold symbols + + # Need to decode it with utf8, because vim returns always a python 2 + # string even if it is unicode. + e = vim_eval('g:jedi#call_signature_escape') + if hasattr(e, 'decode'): + e = e.decode('UTF-8') + # replace line before with cursor + regex = "xjedi=%sx%sxjedix".replace('x', e) + + prefix, replace = line[:insert_column], line[insert_column:end_column] + + # Check the replace stuff for strings, to append them + # (don't want to break the syntax) + regex_quotes = r'''\\*["']+''' + # `add` are all the quotation marks. + # join them with a space to avoid producing ''' + add = ' '.join(re.findall(regex_quotes, replace)) + # search backwards + if add and replace[0] in ['"', "'"]: + a = re.search(regex_quotes + '$', prefix) + add = ('' if a is None else a.group(0)) + add + + tup = '%s, %s' % (len(add), replace) + repl = prefix + (regex % (tup, text)) + add + line[end_column:] + + vim_eval('setline(%s, %s)' % (line_to_replace, repr(PythonToVimStr(repl)))) + + +@catch_and_print_exceptions +def cmdline_call_signatures(signatures): + def get_params(s): + return [p.description.replace('\n', '').replace('param ', '', 1) for p in s.params] + + def escape(string): + return string.replace('"', '\\"').replace(r'\n', r'\\n') + + def join(): + return ', '.join(filter(None, (left, center, right))) + + def too_long(): + return len(join()) > max_msg_len + + if len(signatures) > 1: + params = zip_longest(*map(get_params, signatures), fillvalue='_') + params = ['(' + ', '.join(p) + ')' for p in params] + else: + params = get_params(signatures[0]) + + index = next(iter(s.index for s in signatures if s.index is not None), None) + + # Allow 12 characters for showcmd plus 18 for ruler - setting + # noruler/noshowcmd here causes incorrect undo history + max_msg_len = int(vim_eval('&columns')) - 12 + if int(vim_eval('&ruler')): + max_msg_len -= 18 + max_msg_len -= len(signatures[0].name) + 2 # call name + parentheses + + if max_msg_len < (1 if params else 0): + return + elif index is None: + text = escape(', '.join(params)) + if params and len(text) > max_msg_len: + text = ELLIPSIS + elif max_msg_len < len(ELLIPSIS): + return + else: + left = escape(', '.join(params[:index])) + center = escape(params[index]) + right = escape(', '.join(params[index + 1:])) + while too_long(): + if left and left != ELLIPSIS: + left = ELLIPSIS + continue + if right and right != ELLIPSIS: + right = ELLIPSIS + continue + if (left or right) and center != ELLIPSIS: + left = right = None + center = ELLIPSIS + continue + if too_long(): + # Should never reach here + return + + max_num_spaces = max_msg_len + if index is not None: + max_num_spaces -= len(join()) + _, column = signatures[0].bracket_start + spaces = min(int(vim_eval('g:jedi#first_col +' + 'wincol() - col(".")')) + + column - len(signatures[0].name), + max_num_spaces) * ' ' + + if index is not None: + vim_command(' echon "%s" | ' + 'echohl Function | echon "%s" | ' + 'echohl None | echon "(" | ' + 'echohl jediFunction | echon "%s" | ' + 'echohl jediFat | echon "%s" | ' + 'echohl jediFunction | echon "%s" | ' + 'echohl None | echon ")"' + % (spaces, signatures[0].name, + left + ', ' if left else '', + center, ', ' + right if right else '')) + else: + vim_command(' echon "%s" | ' + 'echohl Function | echon "%s" | ' + 'echohl None | echon "(%s)"' + % (spaces, signatures[0].name, text)) + + +@_check_jedi_availability(show_error=True) +@catch_and_print_exceptions +def rename(): + if not int(vim.eval('a:0')): + # Need to save the cursor position before insert mode + cursor = vim.current.window.cursor + changenr = vim.eval('changenr()') # track undo tree + vim_command('augroup jedi_rename') + vim_command('autocmd InsertLeave call jedi#rename' + '({}, {}, {})'.format(cursor[0], cursor[1], changenr)) + vim_command('augroup END') + + vim_command("let s:jedi_replace_orig = expand('')") + line = vim_eval('getline(".")') + vim_command('normal! diw') + if re.match(r'\w+$', line[cursor[1]:]): + # In case the deleted word is at the end of the line we need to + # move the cursor to the end. + vim_command('startinsert!') + else: + vim_command('startinsert') + + else: + # Remove autocommand. + vim_command('autocmd! jedi_rename InsertLeave') + + args = vim.eval('a:000') + cursor = tuple(int(x) for x in args[:2]) + changenr = args[2] + + # Get replacement, if there is something on the cursor. + # This won't be the case when the user ends insert mode right away, + # and `` would pick up the nearest word instead. + if vim_eval('getline(".")[getpos(".")[2]-1]') != ' ': + replace = vim_eval("expand('')") + else: + replace = None + + vim_command('undo {}'.format(changenr)) + + vim.current.window.cursor = cursor + + if replace: + return do_rename(replace) + + +def rename_visual(): + replace = vim.eval('input("Rename to: ")') + orig = vim.eval('getline(".")[(getpos("\'<")[2]-1):getpos("\'>")[2]]') + do_rename(replace, orig) + + +def do_rename(replace, orig=None): + if not len(replace): + echo_highlight('No rename possible without name.') + return + + if orig is None: + orig = vim_eval('s:jedi_replace_orig') + + # Save original window / tab. + saved_tab = int(vim_eval('tabpagenr()')) + saved_win = int(vim_eval('winnr()')) + + temp_rename = usages(visuals=False) + # Sort the whole thing reverse (positions at the end of the line + # must be first, because they move the stuff before the position). + temp_rename = sorted(temp_rename, reverse=True, + key=lambda x: (str(x.module_path), x.line, x.column)) + buffers = set() + for r in temp_rename: + if r.in_builtin_module(): + continue + + result = set_buffer(r.module_path) + if not result: + echo_highlight('Failed to create buffer window for %s!' % (r.module_path)) + continue + + buffers.add(vim.current.buffer.name) + + # Replace original word. + r_line = vim.current.buffer[r.line - 1] + vim.current.buffer[r.line - 1] = (r_line[:r.column] + replace + + r_line[r.column + len(orig):]) + + # Restore previous tab and window. + vim_command('tabnext {0:d}'.format(saved_tab)) + vim_command('{0:d}wincmd w'.format(saved_win)) + + if len(buffers) > 1: + echo_highlight('Jedi did {0:d} renames in {1:d} buffers!'.format( + len(temp_rename), len(buffers))) + else: + echo_highlight('Jedi did {0:d} renames!'.format(len(temp_rename))) + + +@_check_jedi_availability(show_error=True) +@catch_and_print_exceptions +def py_import(): + args = shsplit(vim.eval('a:args')) + import_path = args.pop() + name = next(get_project().search(import_path), None) + if name is None: + echo_highlight('Cannot find %s in your project or on sys.path!' % import_path) + else: + cmd_args = ' '.join([a.replace(' ', '\\ ') for a in args]) + _goto_specific_name(name, options=cmd_args) + + +@catch_and_print_exceptions +def py_import_completions(): + argl = vim.eval('a:argl') + if jedi is None: + print('Pyimport completion requires jedi module: https://github.com/davidhalter/jedi') + comps = [] + else: + names = get_project().complete_search(argl) + comps = [argl + n for n in sorted(set(c.complete for c in names))] + vim.command("return '%s'" % '\n'.join(comps)) + + +@catch_and_print_exceptions +def set_buffer(path: Optional[Path], options='', using_tagstack=False): + """ + Opens a new buffer if we have to or does nothing. Returns True in case of + success. + """ + path = str(path or '') + # Check both, because it might be an empty string + if path in (vim.current.buffer.name, os.path.abspath(vim.current.buffer.name)): + return True + + path = relpath(path) + # options are what you can to edit the edit options + if int(vim_eval('g:jedi#use_tabs_not_buffers')) == 1: + _tabnew(path, options) + elif not vim_eval('g:jedi#use_splits_not_buffers') in [1, '1']: + user_split_option = vim_eval('g:jedi#use_splits_not_buffers') + split_options = { + 'top': 'topleft split', + 'left': 'topleft vsplit', + 'right': 'botright vsplit', + 'bottom': 'botright split', + 'winwidth': 'vs' + } + if (user_split_option == 'winwidth' and + vim.current.window.width <= 2 * int(vim_eval( + "&textwidth ? &textwidth : 80"))): + split_options['winwidth'] = 'sp' + if user_split_option not in split_options: + print('Unsupported value for g:jedi#use_splits_not_buffers: {0}. ' + 'Valid options are: {1}.'.format( + user_split_option, ', '.join(split_options.keys()))) + else: + vim_command(split_options[user_split_option] + " %s" % escape_file_path(path)) + else: + if int(vim_eval("!&hidden && &modified")) == 1: + if not vim_eval("bufname('%')"): + echo_highlight('Cannot open a new buffer, use `:set hidden` or save your buffer') + return False + else: + vim_command('w') + if using_tagstack: + return True + vim_command('edit %s %s' % (options, escape_file_path(path))) + # sometimes syntax is being disabled and the filetype not set. + if int(vim_eval('!exists("g:syntax_on")')) == 1: + vim_command('syntax enable') + if int(vim_eval("&filetype != 'python'")) == 1: + vim_command('set filetype=python') + return True + + +@catch_and_print_exceptions +def _tabnew(path, options=''): + """ + Open a file in a new tab or switch to an existing one. + + :param options: `:tabnew` options, read vim help. + """ + if int(vim_eval('has("gui")')) == 1: + vim_command('tab drop %s %s' % (options, escape_file_path(path))) + return + + for tab_nr in range(int(vim_eval("tabpagenr('$')"))): + for buf_nr in vim_eval("tabpagebuflist(%i + 1)" % tab_nr): + buf_nr = int(buf_nr) - 1 + try: + buf_path = vim.buffers[buf_nr].name + except (LookupError, ValueError): + # Just do good old asking for forgiveness. + # don't know why this happens :-) + pass + else: + if os.path.abspath(buf_path) == os.path.abspath(path): + # tab exists, just switch to that tab + vim_command('tabfirst | tabnext %i' % (tab_nr + 1)) + # Goto the buffer's window. + vim_command('exec bufwinnr(%i) . " wincmd w"' % (buf_nr + 1)) + break + else: + continue + break + else: + # tab doesn't exist, add a new one. + vim_command('tabnew %s' % escape_file_path(path)) + + +def escape_file_path(path): + return path.replace(' ', r'\ ') + + +def print_to_stdout(level, str_out): + print(str_out) diff --git a/bundle/jedi-vim/pythonx/jedi_vim_debug.py b/bundle/jedi-vim/pythonx/jedi_vim_debug.py new file mode 100644 index 000000000..2c6732255 --- /dev/null +++ b/bundle/jedi-vim/pythonx/jedi_vim_debug.py @@ -0,0 +1,96 @@ +"""Used in jedi-vim's jedi#debug_info()""" +import sys + +import vim +from jedi_vim import PythonToVimStr, jedi + + +def echo(msg): + vim.command('echo %r' % PythonToVimStr(msg)) + + +def echo_error(msg): + vim.command('echohl ErrorMsg') + echo(msg) + vim.command('echohl None') + + +def format_exc_info(exc_info=None, tb_indent=2): + import traceback + + if exc_info is None: + exc_info = sys.exc_info() + + exc_msg = traceback.format_exception_only(exc_info[0], exc_info[1]) + lines = ''.join(exc_msg).rstrip('\n').split('\n') + + lines.append('Traceback (most recent call last):') + tb = traceback.format_tb(exc_info[2]) + lines.extend(''.join(tb).rstrip('\n').split('\n')) + + indent = ' ' * tb_indent + return '{0}'.format(('\n' + indent).join(lines)) + + +def get_known_environments(): + """Get known Jedi environments.""" + envs = list(jedi.find_virtualenvs()) + envs.extend(jedi.find_system_environments()) + return envs + + +def display_debug_info(): + echo(' - global sys.executable: `{0}`'.format(sys.executable)) + echo(' - global sys.version: `{0}`'.format( + ', '.join([x.strip() + for x in sys.version.split('\n')]))) + echo(' - global site module: `{0}`'.format(__import__('site').__file__)) + + try: + import jedi_vim + except Exception: + echo_error('ERROR: could not import jedi_vim: {0}'.format( + format_exc_info())) + return + + if jedi_vim.jedi is None: + if hasattr(jedi_vim, 'jedi_import_error'): + error_msg = format_exc_info(jedi_vim.jedi_import_error) + else: + error_msg = 'unknown error' + echo_error('ERROR: could not import the "jedi" Python module: {0}'.format( + error_msg)) + else: + echo('\n##### Jedi\n\n - path: `{0}`'.format(jedi_vim.jedi.__file__)) + echo(' - version: {0}'.format(jedi_vim.jedi.__version__)) + + try: + project = jedi_vim.get_project() + environment = project.get_environment() + except AttributeError: + script_evaluator = jedi_vim.jedi.Script('')._evaluator + try: + sys_path = script_evaluator.project.sys_path + except AttributeError: + sys_path = script_evaluator.sys_path + else: + echo('\n##### Jedi environment: {0}\n\n'.format(environment)) + echo(' - executable: {0}'.format(environment.executable)) + try: + sys_path = environment.get_sys_path() + except Exception: + echo_error('ERROR: failed to get sys path from environment: {0}'.format( + format_exc_info())) + return + + echo(' - sys_path:') + for p in sys_path: + echo(' - `{0}`'.format(p)) + + if environment: + echo('\n##### Known environments\n\n') + for environment in get_known_environments(): + echo(' - {0} ({1})\n'.format( + environment, + environment.executable, + )) diff --git a/bundle/jedi-vim/pythonx/parso/.github/workflows/build.yml b/bundle/jedi-vim/pythonx/parso/.github/workflows/build.yml new file mode 100644 index 000000000..0de073e11 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/.github/workflows/build.yml @@ -0,0 +1,68 @@ +name: Build + +on: [push, pull_request] + +env: + PYTEST_ADDOPTS: --color=yes + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: '3.8' + - name: Install dependencies + run: | + python -m pip install --upgrade pip setuptools wheel + pip install .[qa] + - name: Run Flake8 + # Ignore F401, which are unused imports. flake8 is a primitive tool and is sometimes wrong. + run: flake8 --extend-ignore F401 parso test/*.py setup.py scripts/ + - name: Run Mypy + run: mypy parso + test: + runs-on: ubuntu-latest + continue-on-error: ${{ matrix.experimental }} + strategy: + fail-fast: false + matrix: + python-version: ['3.6', '3.7', '3.8', '3.9'] + experimental: [false] + # include: + # - python-version: '3.10-dev' + # experimental: true + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip setuptools wheel + pip install .[testing] + - name: Run pytest + run: pytest + coverage: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: '3.8' + - name: Install dependencies + run: | + python -m pip install --upgrade pip setuptools wheel + pip install .[testing] coverage coveralls + - name: Run pytest with coverage + run: | + coverage run -m pytest + coverage report + - name: Upload coverage report to Coveralls + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: coveralls --service=github diff --git a/bundle/jedi-vim/pythonx/parso/.gitignore b/bundle/jedi-vim/pythonx/parso/.gitignore new file mode 100644 index 000000000..2d869e27b --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/.gitignore @@ -0,0 +1,14 @@ +*~ +*.sw? +*.pyc +.coveralls.yml +.coverage +/build/ +/docs/_build/ +/dist/ +parso.egg-info/ +/.cache/ +/.pytest_cache +test/fuzz-redo.pickle +/venv/ +/htmlcov/ diff --git a/bundle/jedi-vim/pythonx/parso/AUTHORS.txt b/bundle/jedi-vim/pythonx/parso/AUTHORS.txt new file mode 100644 index 000000000..9737530ba --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/AUTHORS.txt @@ -0,0 +1,58 @@ +Main Authors +============ + +David Halter (@davidhalter) + +Code Contributors +================= +Alisdair Robertson (@robodair) +Bryan Forbes (@bryanforbes) + + +Code Contributors (to Jedi and therefore possibly to this library) +================================================================== + +Takafumi Arakaki (@tkf) +Danilo Bargen (@dbrgn) +Laurens Van Houtven (@lvh) <_@lvh.cc> +Aldo Stracquadanio (@Astrac) +Jean-Louis Fuchs (@ganwell) +tek (@tek) +Yasha Borevich (@jjay) +Aaron Griffin +andviro (@andviro) +Mike Gilbert (@floppym) +Aaron Meurer (@asmeurer) +Lubos Trilety +Akinori Hattori (@hattya) +srusskih (@srusskih) +Steven Silvester (@blink1073) +Colin Duquesnoy (@ColinDuquesnoy) +Jorgen Schaefer (@jorgenschaefer) +Fredrik Bergroth (@fbergroth) +Mathias Fußenegger (@mfussenegger) +Syohei Yoshida (@syohex) +ppalucky (@ppalucky) +immerrr (@immerrr) immerrr@gmail.com +Albertas Agejevas (@alga) +Savor d'Isavano (@KenetJervet) +Phillip Berndt (@phillipberndt) +Ian Lee (@IanLee1521) +Farkhad Khatamov (@hatamov) +Kevin Kelley (@kelleyk) +Sid Shanker (@squidarth) +Reinoud Elhorst (@reinhrst) +Guido van Rossum (@gvanrossum) +Dmytro Sadovnychyi (@sadovnychyi) +Cristi Burcă (@scribu) +bstaint (@bstaint) +Mathias Rav (@Mortal) +Daniel Fiterman (@dfit99) +Simon Ruggier (@sruggier) +Élie Gouzien (@ElieGouzien) +Tim Gates (@timgates42) +Batuhan Taskaya (@isidentical) +Jocelyn Boullier (@Kazy) + + +Note: (@user) means a github user name. diff --git a/bundle/jedi-vim/pythonx/parso/CHANGELOG.rst b/bundle/jedi-vim/pythonx/parso/CHANGELOG.rst new file mode 100644 index 000000000..815b8466c --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/CHANGELOG.rst @@ -0,0 +1,148 @@ +.. :changelog: + +Changelog +--------- + +Unreleased +++++++++++ + +0.8.3 (2021-11-30) +++++++++++++++++++ + +- Add basic support for Python 3.11 and 3.12 + +0.8.2 (2021-03-30) +++++++++++++++++++ + +- Various small bugfixes + +0.8.1 (2020-12-10) +++++++++++++++++++ + +- Various small bugfixes + +0.8.0 (2020-08-05) +++++++++++++++++++ + +- Dropped Support for Python 2.7, 3.4, 3.5 +- It's possible to use ``pathlib.Path`` objects now in the API +- The stubs are gone, we are now using annotations +- ``namedexpr_test`` nodes are now a proper class called ``NamedExpr`` +- A lot of smaller refactorings + +0.7.1 (2020-07-24) +++++++++++++++++++ + +- Fixed a couple of smaller bugs (mostly syntax error detection in + ``Grammar.iter_errors``) + +This is going to be the last release that supports Python 2.7, 3.4 and 3.5. + +0.7.0 (2020-04-13) +++++++++++++++++++ + +- Fix a lot of annoying bugs in the diff parser. The fuzzer did not find + issues anymore even after running it for more than 24 hours (500k tests). +- Small grammar change: suites can now contain newlines even after a newline. + This should really not matter if you don't use error recovery. It allows for + nicer error recovery. + +0.6.2 (2020-02-27) +++++++++++++++++++ + +- Bugfixes +- Add Grammar.refactor (might still be subject to change until 0.7.0) + +0.6.1 (2020-02-03) +++++++++++++++++++ + +- Add ``parso.normalizer.Issue.end_pos`` to make it possible to know where an + issue ends + +0.6.0 (2020-01-26) +++++++++++++++++++ + +- Dropped Python 2.6/Python 3.3 support +- del_stmt names are now considered as a definition + (for ``name.is_definition()``) +- Bugfixes + +0.5.2 (2019-12-15) +++++++++++++++++++ + +- Add include_setitem to get_definition/is_definition and get_defined_names (#66) +- Fix named expression error listing (#89, #90) +- Fix some f-string tokenizer issues (#93) + +0.5.1 (2019-07-13) +++++++++++++++++++ + +- Fix: Some unicode identifiers were not correctly tokenized +- Fix: Line continuations in f-strings are now working + +0.5.0 (2019-06-20) +++++++++++++++++++ + +- **Breaking Change** comp_for is now called sync_comp_for for all Python + versions to be compatible with the Python 3.8 Grammar +- Added .pyi stubs for a lot of the parso API +- Small FileIO changes + +0.4.0 (2019-04-05) +++++++++++++++++++ + +- Python 3.8 support +- FileIO support, it's now possible to use abstract file IO, support is alpha + +0.3.4 (2019-02-13) ++++++++++++++++++++ + +- Fix an f-string tokenizer error + +0.3.3 (2019-02-06) ++++++++++++++++++++ + +- Fix async errors in the diff parser +- A fix in iter_errors +- This is a very small bugfix release + +0.3.2 (2019-01-24) ++++++++++++++++++++ + +- 20+ bugfixes in the diff parser and 3 in the tokenizer +- A fuzzer for the diff parser, to give confidence that the diff parser is in a + good shape. +- Some bugfixes for f-string + +0.3.1 (2018-07-09) ++++++++++++++++++++ + +- Bugfixes in the diff parser and keyword-only arguments + +0.3.0 (2018-06-30) ++++++++++++++++++++ + +- Rewrote the pgen2 parser generator. + +0.2.1 (2018-05-21) ++++++++++++++++++++ + +- A bugfix for the diff parser. +- Grammar files can now be loaded from a specific path. + +0.2.0 (2018-04-15) ++++++++++++++++++++ + +- f-strings are now parsed as a part of the normal Python grammar. This makes + it way easier to deal with them. + +0.1.1 (2017-11-05) ++++++++++++++++++++ + +- Fixed a few bugs in the caching layer +- Added support for Python 3.7 + +0.1.0 (2017-09-04) ++++++++++++++++++++ + +- Pulling the library out of Jedi. Some APIs will definitely change. diff --git a/bundle/jedi-vim/pythonx/parso/CONTRIBUTING.md b/bundle/jedi-vim/pythonx/parso/CONTRIBUTING.md new file mode 100644 index 000000000..60aa841aa --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/CONTRIBUTING.md @@ -0,0 +1,8 @@ +We <3 Pull Requests! Three core things: + + 1. If you are adding functionality or fixing a bug, please add a test! + 2. Add your name to AUTHORS.txt + 3. Use the PEP8 style guide. + + If you want to add methods to the parser tree, we will need to discuss this in + an issue first. diff --git a/bundle/jedi-vim/pythonx/parso/LICENSE.txt b/bundle/jedi-vim/pythonx/parso/LICENSE.txt new file mode 100644 index 000000000..08c41db01 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/LICENSE.txt @@ -0,0 +1,86 @@ +All contributions towards parso are MIT licensed. + +Some Python files have been taken from the standard library and are therefore +PSF licensed. Modifications on these files are dual licensed (both MIT and +PSF). These files are: + +- parso/pgen2/* +- parso/tokenize.py +- parso/token.py +- test/test_pgen2.py + +Also some test files under test/normalizer_issue_files have been copied from +https://github.com/PyCQA/pycodestyle (Expat License == MIT License). + +------------------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) <2013-2017> + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +------------------------------------------------------------------------------- + +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved" +are retained in Python alone or in any derivative version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. diff --git a/bundle/jedi-vim/pythonx/parso/MANIFEST.in b/bundle/jedi-vim/pythonx/parso/MANIFEST.in new file mode 100644 index 000000000..e54f3ea59 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/MANIFEST.in @@ -0,0 +1,11 @@ +include README.rst +include CHANGELOG.rst +include LICENSE.txt +include AUTHORS.txt +include .coveragerc +include conftest.py +include pytest.ini +include parso/python/grammar*.txt +recursive-include test * +recursive-include docs * +recursive-exclude * *.pyc diff --git a/bundle/jedi-vim/pythonx/parso/README.rst b/bundle/jedi-vim/pythonx/parso/README.rst new file mode 100644 index 000000000..98abc7360 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/README.rst @@ -0,0 +1,95 @@ +################################################################### +parso - A Python Parser +################################################################### + + +.. image:: https://github.com/davidhalter/parso/workflows/Build/badge.svg?branch=master + :target: https://github.com/davidhalter/parso/actions + :alt: GitHub Actions build status + +.. image:: https://coveralls.io/repos/github/davidhalter/parso/badge.svg?branch=master + :target: https://coveralls.io/github/davidhalter/parso?branch=master + :alt: Coverage Status + +.. image:: https://pepy.tech/badge/parso + :target: https://pepy.tech/project/parso + :alt: PyPI Downloads + +.. image:: https://raw.githubusercontent.com/davidhalter/parso/master/docs/_static/logo_characters.png + +Parso is a Python parser that supports error recovery and round-trip parsing +for different Python versions (in multiple Python versions). Parso is also able +to list multiple syntax errors in your python file. + +Parso has been battle-tested by jedi_. It was pulled out of jedi to be useful +for other projects as well. + +Parso consists of a small API to parse Python and analyse the syntax tree. + +A simple example: + +.. code-block:: python + + >>> import parso + >>> module = parso.parse('hello + 1', version="3.9") + >>> expr = module.children[0] + >>> expr + PythonNode(arith_expr, [, , ]) + >>> print(expr.get_code()) + hello + 1 + >>> name = expr.children[0] + >>> name + + >>> name.end_pos + (1, 5) + >>> expr.end_pos + (1, 9) + +To list multiple issues: + +.. code-block:: python + + >>> grammar = parso.load_grammar() + >>> module = grammar.parse('foo +\nbar\ncontinue') + >>> error1, error2 = grammar.iter_errors(module) + >>> error1.message + 'SyntaxError: invalid syntax' + >>> error2.message + "SyntaxError: 'continue' not properly in loop" + +Resources +========= + +- `Testing `_ +- `PyPI `_ +- `Docs `_ +- Uses `semantic versioning `_ + +Installation +============ + + pip install parso + +Future +====== + +- There will be better support for refactoring and comments. Stay tuned. +- There's a WIP PEP8 validator. It's however not in a good shape, yet. + +Known Issues +============ + +- `async`/`await` are already used as keywords in Python3.6. +- `from __future__ import print_function` is not ignored. + + +Acknowledgements +================ + +- Guido van Rossum (@gvanrossum) for creating the parser generator pgen2 + (originally used in lib2to3). +- `Salome Schneider `_ + for the extremely awesome parso logo. + + +.. _jedi: https://github.com/davidhalter/jedi diff --git a/bundle/jedi-vim/pythonx/parso/conftest.py b/bundle/jedi-vim/pythonx/parso/conftest.py new file mode 100644 index 000000000..35a184647 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/conftest.py @@ -0,0 +1,147 @@ +import re +import tempfile +import shutil +import logging +import os +from pathlib import Path + +import pytest + +import parso +from parso import cache +from parso.utils import parse_version_string + +collect_ignore = ["setup.py"] + +_SUPPORTED_VERSIONS = '3.6', '3.7', '3.8', '3.9', '3.10' + + +@pytest.fixture(scope='session') +def clean_parso_cache(): + """ + Set the default cache directory to a temporary directory during tests. + + Note that you can't use built-in `tmpdir` and `monkeypatch` + fixture here because their scope is 'function', which is not used + in 'session' scope fixture. + + This fixture is activated in ../pytest.ini. + """ + old = cache._default_cache_path + tmp = tempfile.mkdtemp(prefix='parso-test-') + cache._default_cache_path = Path(tmp) + yield + cache._default_cache_path = old + shutil.rmtree(tmp) + + +def pytest_addoption(parser): + parser.addoption("--logging", "-L", action='store_true', + help="Enables the logging output.") + + +def pytest_generate_tests(metafunc): + if 'normalizer_issue_case' in metafunc.fixturenames: + base_dir = os.path.join(os.path.dirname(__file__), 'test', 'normalizer_issue_files') + + cases = list(colllect_normalizer_tests(base_dir)) + metafunc.parametrize( + 'normalizer_issue_case', + cases, + ids=[c.name for c in cases] + ) + elif 'each_version' in metafunc.fixturenames: + metafunc.parametrize('each_version', _SUPPORTED_VERSIONS) + elif 'version_ge_py38' in metafunc.fixturenames: + ge38 = set(_SUPPORTED_VERSIONS) - {'3.6', '3.7'} + metafunc.parametrize('version_ge_py38', sorted(ge38)) + + +class NormalizerIssueCase: + """ + Static Analysis cases lie in the static_analysis folder. + The tests also start with `#!`, like the goto_definition tests. + """ + def __init__(self, path): + self.path = path + self.name = os.path.basename(path) + match = re.search(r'python([\d.]+)\.py', self.name) + self.python_version = match and match.group(1) + + +def colllect_normalizer_tests(base_dir): + for f_name in os.listdir(base_dir): + if f_name.endswith(".py"): + path = os.path.join(base_dir, f_name) + yield NormalizerIssueCase(path) + + +def pytest_configure(config): + if config.option.logging: + root = logging.getLogger() + root.setLevel(logging.DEBUG) + + #ch = logging.StreamHandler(sys.stdout) + #ch.setLevel(logging.DEBUG) + #formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + #ch.setFormatter(formatter) + + #root.addHandler(ch) + + +class Checker: + def __init__(self, version, is_passing): + self.version = version + self._is_passing = is_passing + self.grammar = parso.load_grammar(version=self.version) + + def parse(self, code): + if self._is_passing: + return parso.parse(code, version=self.version, error_recovery=False) + else: + self._invalid_syntax(code) + + def _invalid_syntax(self, code): + with pytest.raises(parso.ParserSyntaxError): + module = parso.parse(code, version=self.version, error_recovery=False) + # For debugging + print(module.children) + + def get_error(self, code): + errors = list(self.grammar.iter_errors(self.grammar.parse(code))) + assert bool(errors) != self._is_passing + if errors: + return errors[0] + + def get_error_message(self, code): + error = self.get_error(code) + if error is None: + return + return error.message + + def assert_no_error_in_passing(self, code): + if self._is_passing: + module = self.grammar.parse(code) + assert not list(self.grammar.iter_errors(module)) + + +@pytest.fixture +def works_not_in_py(each_version): + return Checker(each_version, False) + + +@pytest.fixture +def works_in_py(each_version): + return Checker(each_version, True) + + +@pytest.fixture +def works_ge_py38(each_version): + version_info = parse_version_string(each_version) + return Checker(each_version, version_info >= (3, 8)) + + +@pytest.fixture +def works_ge_py39(each_version): + version_info = parse_version_string(each_version) + return Checker(each_version, version_info >= (3, 9)) diff --git a/bundle/jedi-vim/pythonx/parso/deploy-master.sh b/bundle/jedi-vim/pythonx/parso/deploy-master.sh new file mode 100644 index 000000000..79d25f85e --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/deploy-master.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +# The script creates a separate folder in build/ and creates tags there, pushes +# them and then uploads the package to PyPI. + +set -eu -o pipefail + +BASE_DIR=$(dirname $(readlink -f "$0")) +cd $BASE_DIR + +git fetch --tags + +PROJECT_NAME=parso +BRANCH=master +BUILD_FOLDER=build + +[ -d $BUILD_FOLDER ] || mkdir $BUILD_FOLDER +# Remove the previous deployment first. +# Checkout the right branch +cd $BUILD_FOLDER +rm -rf $PROJECT_NAME +git clone .. $PROJECT_NAME +cd $PROJECT_NAME +git checkout $BRANCH + +# Test first. +pytest + +# Create tag +tag=v$(python3 -c "import $PROJECT_NAME; print($PROJECT_NAME.__version__)") + +master_ref=$(git show-ref -s heads/$BRANCH) +tag_ref=$(git show-ref -s $tag || true) +if [[ $tag_ref ]]; then + if [[ $tag_ref != $master_ref ]]; then + echo 'Cannot tag something that has already been tagged with another commit.' + exit 1 + fi +else + git tag -a $tag + git push --tags +fi + +# Package and upload to PyPI +#rm -rf dist/ - Not needed anymore, because the folder is never reused. +echo `pwd` +python3 setup.py sdist bdist_wheel +# Maybe do a pip install twine before. +twine upload dist/* + +cd $BASE_DIR +# The tags have been pushed to this repo. Push the tags to github, now. +git push --tags diff --git a/bundle/jedi-vim/pythonx/parso/docs/Makefile b/bundle/jedi-vim/pythonx/parso/docs/Makefile new file mode 100644 index 000000000..1348ee363 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/docs/Makefile @@ -0,0 +1,153 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/parso.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/parso.qhc" + +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/parso" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/parso" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/bundle/jedi-vim/pythonx/parso/docs/README.md b/bundle/jedi-vim/pythonx/parso/docs/README.md new file mode 100644 index 000000000..d931ca390 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/docs/README.md @@ -0,0 +1,6 @@ +Installation +------------ + +Install sphinx:: + + sudo pip install sphinx diff --git a/bundle/jedi-vim/pythonx/parso/docs/_static/logo.png b/bundle/jedi-vim/pythonx/parso/docs/_static/logo.png new file mode 100644 index 000000000..5c3e47a69 Binary files /dev/null and b/bundle/jedi-vim/pythonx/parso/docs/_static/logo.png differ diff --git a/bundle/jedi-vim/pythonx/parso/docs/_static/logo_characters.png b/bundle/jedi-vim/pythonx/parso/docs/_static/logo_characters.png new file mode 100644 index 000000000..c2387ec54 Binary files /dev/null and b/bundle/jedi-vim/pythonx/parso/docs/_static/logo_characters.png differ diff --git a/bundle/jedi-vim/pythonx/parso/docs/_templates/ghbuttons.html b/bundle/jedi-vim/pythonx/parso/docs/_templates/ghbuttons.html new file mode 100644 index 000000000..75d98a2b0 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/docs/_templates/ghbuttons.html @@ -0,0 +1,4 @@ +

Github

+ +

diff --git a/bundle/jedi-vim/pythonx/parso/docs/_templates/sidebarlogo.html b/bundle/jedi-vim/pythonx/parso/docs/_templates/sidebarlogo.html new file mode 100644 index 000000000..d9243c4eb --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/docs/_templates/sidebarlogo.html @@ -0,0 +1,3 @@ + diff --git a/bundle/jedi-vim/pythonx/parso/docs/_themes/flask/LICENSE b/bundle/jedi-vim/pythonx/parso/docs/_themes/flask/LICENSE new file mode 100644 index 000000000..8daab7ee6 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/docs/_themes/flask/LICENSE @@ -0,0 +1,37 @@ +Copyright (c) 2010 by Armin Ronacher. + +Some rights reserved. + +Redistribution and use in source and binary forms of the theme, with or +without modification, are permitted provided that the following conditions +are met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +* The names of the contributors may not be used to endorse or + promote products derived from this software without specific + prior written permission. + +We kindly ask you to only use these themes in an unmodified manner just +for Flask and Flask-related products, not for unrelated projects. If you +like the visual style and want to use it for your own projects, please +consider making some larger changes to the themes (such as changing +font faces, sizes, colors or margins). + +THIS THEME IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS THEME, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/bundle/jedi-vim/pythonx/parso/docs/_themes/flask/layout.html b/bundle/jedi-vim/pythonx/parso/docs/_themes/flask/layout.html new file mode 100644 index 000000000..bcd9ddeba --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/docs/_themes/flask/layout.html @@ -0,0 +1,27 @@ +{%- extends "basic/layout.html" %} +{%- block extrahead %} + {{ super() }} + {% if theme_touch_icon %} + + {% endif %} + + + Fork me + +{% endblock %} +{%- block relbar2 %}{% endblock %} +{% block header %} + {{ super() }} + {% if pagename == 'index' %} +
+ {% endif %} +{% endblock %} +{%- block footer %} + + {% if pagename == 'index' %} +
+ {% endif %} +{%- endblock %} diff --git a/bundle/jedi-vim/pythonx/parso/docs/_themes/flask/relations.html b/bundle/jedi-vim/pythonx/parso/docs/_themes/flask/relations.html new file mode 100644 index 000000000..3bbcde85b --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/docs/_themes/flask/relations.html @@ -0,0 +1,19 @@ +

Related Topics

+ diff --git a/bundle/jedi-vim/pythonx/parso/docs/_themes/flask/static/flasky.css_t b/bundle/jedi-vim/pythonx/parso/docs/_themes/flask/static/flasky.css_t new file mode 100644 index 000000000..79ab47871 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/docs/_themes/flask/static/flasky.css_t @@ -0,0 +1,394 @@ +/* + * flasky.css_t + * ~~~~~~~~~~~~ + * + * :copyright: Copyright 2010 by Armin Ronacher. + * :license: Flask Design License, see LICENSE for details. + */ + +{% set page_width = '940px' %} +{% set sidebar_width = '220px' %} + +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: 'Georgia', serif; + font-size: 17px; + background-color: white; + color: #000; + margin: 0; + padding: 0; +} + +div.document { + width: {{ page_width }}; + margin: 30px auto 0 auto; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 {{ sidebar_width }}; +} + +div.sphinxsidebar { + width: {{ sidebar_width }}; +} + +hr { + border: 1px solid #B1B4B6; +} + +div.body { + background-color: #ffffff; + color: #3E4349; + padding: 0 30px 0 30px; +} + +img.floatingflask { + padding: 0 0 10px 10px; + float: right; +} + +div.footer { + width: {{ page_width }}; + margin: 20px auto 30px auto; + font-size: 14px; + color: #888; + text-align: right; +} + +div.footer a { + color: #888; +} + +div.related { + display: none; +} + +div.sphinxsidebar a { + color: #444; + text-decoration: none; + border-bottom: 1px dotted #999; +} + +div.sphinxsidebar a:hover { + border-bottom: 1px solid #999; +} + +div.sphinxsidebar { + font-size: 14px; + line-height: 1.5; +} + +div.sphinxsidebarwrapper { + padding: 18px 10px; +} + +div.sphinxsidebarwrapper p.logo { + padding: 0 0 20px 0; + margin: 0; + text-align: center; +} + +div.sphinxsidebar h3, +div.sphinxsidebar h4 { + font-family: 'Garamond', 'Georgia', serif; + color: #444; + font-size: 24px; + font-weight: normal; + margin: 0 0 5px 0; + padding: 0; +} + +div.sphinxsidebar h4 { + font-size: 20px; +} + +div.sphinxsidebar h3 a { + color: #444; +} + +div.sphinxsidebar p.logo a, +div.sphinxsidebar h3 a, +div.sphinxsidebar p.logo a:hover, +div.sphinxsidebar h3 a:hover { + border: none; +} + +div.sphinxsidebar p { + color: #555; + margin: 10px 0; +} + +div.sphinxsidebar ul { + margin: 10px 0; + padding: 0; + color: #000; +} + +div.sphinxsidebar input { + border: 1px solid #ccc; + font-family: 'Georgia', serif; + font-size: 1em; +} + +/* -- body styles ----------------------------------------------------------- */ + +a { + color: #004B6B; + text-decoration: underline; +} + +a:hover { + color: #6D4100; + text-decoration: underline; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: 'Garamond', 'Georgia', serif; + font-weight: normal; + margin: 30px 0px 10px 0px; + padding: 0; +} + +{% if theme_index_logo %} +div.indexwrapper h1 { + text-indent: -999999px; + background: url({{ theme_index_logo }}) no-repeat center center; + height: {{ theme_index_logo_height }}; +} +{% endif %} + +div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; } +div.body h2 { font-size: 180%; } +div.body h3 { font-size: 150%; } +div.body h4 { font-size: 130%; } +div.body h5 { font-size: 100%; } +div.body h6 { font-size: 100%; } + +a.headerlink { + color: #ddd; + padding: 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + color: #444; +} + +div.body p, div.body dd, div.body li { + line-height: 1.4em; +} + +div.admonition { + background: #fafafa; + margin: 20px -30px; + padding: 10px 30px; + border-top: 1px solid #ccc; + border-bottom: 1px solid #ccc; +} + +div.admonition tt.xref, div.admonition a tt { + border-bottom: 1px solid #fafafa; +} + +dd div.admonition { + margin-left: -60px; + padding-left: 60px; +} + +div.admonition p.admonition-title { + font-family: 'Garamond', 'Georgia', serif; + font-weight: normal; + font-size: 24px; + margin: 0 0 10px 0; + padding: 0; + line-height: 1; +} + +div.admonition p.last { + margin-bottom: 0; +} + +div.highlight { + background-color: white; +} + +dt:target, .highlight { + background: #FAF3E8; +} + +div.note { + background-color: #eee; + border: 1px solid #ccc; +} + +div.seealso { + background-color: #ffc; + border: 1px solid #ff6; +} + +div.topic { + background-color: #eee; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre, tt { + font-family: 'Consolas', 'Menlo', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; + font-size: 0.9em; +} + +img.screenshot { +} + +tt.descname, tt.descclassname { + font-size: 0.95em; +} + +tt.descname { + padding-right: 0.08em; +} + +img.screenshot { + -moz-box-shadow: 2px 2px 4px #eee; + -webkit-box-shadow: 2px 2px 4px #eee; + box-shadow: 2px 2px 4px #eee; +} + +table.docutils { + border: 1px solid #888; + -moz-box-shadow: 2px 2px 4px #eee; + -webkit-box-shadow: 2px 2px 4px #eee; + box-shadow: 2px 2px 4px #eee; +} + +table.docutils td, table.docutils th { + border: 1px solid #888; + padding: 0.25em 0.7em; +} + +table.field-list, table.footnote { + border: none; + -moz-box-shadow: none; + -webkit-box-shadow: none; + box-shadow: none; +} + +table.footnote { + margin: 15px 0; + width: 100%; + border: 1px solid #eee; + background: #fdfdfd; + font-size: 0.9em; +} + +table.footnote + table.footnote { + margin-top: -15px; + border-top: none; +} + +table.field-list th { + padding: 0 0.8em 0 0; +} + +table.field-list td { + padding: 0; +} + +table.footnote td.label { + width: 0px; + padding: 0.3em 0 0.3em 0.5em; +} + +table.footnote td { + padding: 0.3em 0.5em; +} + +dl { + margin: 0; + padding: 0; +} + +dl dd { + margin-left: 30px; +} + +blockquote { + margin: 0 0 0 30px; + padding: 0; +} + +ul, ol { + margin: 10px 0 10px 30px; + padding: 0; +} + +pre { + background: #eee; + padding: 7px 30px; + margin: 15px -30px; + line-height: 1.3em; +} + +dl pre, blockquote pre, li pre { + margin-left: -60px; + padding-left: 60px; +} + +dl dl pre { + margin-left: -90px; + padding-left: 90px; +} + +tt { + background-color: #ecf0f3; + color: #222; + /* padding: 1px 2px; */ +} + +tt.xref, a tt { + background-color: #FBFBFB; + border-bottom: 1px solid white; +} + +a.reference { + text-decoration: none; + border-bottom: 1px dotted #004B6B; +} + +a.reference:hover { + border-bottom: 1px solid #6D4100; +} + +a.footnote-reference { + text-decoration: none; + font-size: 0.7em; + vertical-align: top; + border-bottom: 1px dotted #004B6B; +} + +a.footnote-reference:hover { + border-bottom: 1px solid #6D4100; +} + +a:hover tt { + background: #EEE; +} diff --git a/bundle/jedi-vim/pythonx/parso/docs/_themes/flask/static/small_flask.css b/bundle/jedi-vim/pythonx/parso/docs/_themes/flask/static/small_flask.css new file mode 100644 index 000000000..1c6df309e --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/docs/_themes/flask/static/small_flask.css @@ -0,0 +1,70 @@ +/* + * small_flask.css_t + * ~~~~~~~~~~~~~~~~~ + * + * :copyright: Copyright 2010 by Armin Ronacher. + * :license: Flask Design License, see LICENSE for details. + */ + +body { + margin: 0; + padding: 20px 30px; +} + +div.documentwrapper { + float: none; + background: white; +} + +div.sphinxsidebar { + display: block; + float: none; + width: 102.5%; + margin: 50px -30px -20px -30px; + padding: 10px 20px; + background: #333; + color: white; +} + +div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p, +div.sphinxsidebar h3 a { + color: white; +} + +div.sphinxsidebar a { + color: #aaa; +} + +div.sphinxsidebar p.logo { + display: none; +} + +div.document { + width: 100%; + margin: 0; +} + +div.related { + display: block; + margin: 0; + padding: 10px 0 20px 0; +} + +div.related ul, +div.related ul li { + margin: 0; + padding: 0; +} + +div.footer { + display: none; +} + +div.bodywrapper { + margin: 0; +} + +div.body { + min-height: 0; + padding: 0; +} diff --git a/bundle/jedi-vim/pythonx/parso/docs/_themes/flask/theme.conf b/bundle/jedi-vim/pythonx/parso/docs/_themes/flask/theme.conf new file mode 100644 index 000000000..1d5657f2f --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/docs/_themes/flask/theme.conf @@ -0,0 +1,9 @@ +[theme] +inherit = basic +stylesheet = flasky.css +pygments_style = flask_theme_support.FlaskyStyle + +[options] +index_logo = +index_logo_height = 120px +touch_icon = diff --git a/bundle/jedi-vim/pythonx/parso/docs/_themes/flask_theme_support.py b/bundle/jedi-vim/pythonx/parso/docs/_themes/flask_theme_support.py new file mode 100644 index 000000000..d3e33c060 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/docs/_themes/flask_theme_support.py @@ -0,0 +1,125 @@ +""" +Copyright (c) 2010 by Armin Ronacher. + +Some rights reserved. + +Redistribution and use in source and binary forms of the theme, with or +without modification, are permitted provided that the following conditions +are met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +* The names of the contributors may not be used to endorse or + promote products derived from this software without specific + prior written permission. + +We kindly ask you to only use these themes in an unmodified manner just +for Flask and Flask-related products, not for unrelated projects. If you +like the visual style and want to use it for your own projects, please +consider making some larger changes to the themes (such as changing +font faces, sizes, colors or margins). + +THIS THEME IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS THEME, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. +""" +# flasky extensions. flasky pygments style based on tango style +from pygments.style import Style +from pygments.token import Keyword, Name, Comment, String, Error, \ + Number, Operator, Generic, Whitespace, Punctuation, Other, Literal + + +class FlaskyStyle(Style): + background_color = "#f8f8f8" + default_style = "" + + styles = { + # No corresponding class for the following: + #Text: "", # class: '' + Whitespace: "underline #f8f8f8", # class: 'w' + Error: "#a40000 border:#ef2929", # class: 'err' + Other: "#000000", # class 'x' + + Comment: "italic #8f5902", # class: 'c' + Comment.Preproc: "noitalic", # class: 'cp' + + Keyword: "bold #004461", # class: 'k' + Keyword.Constant: "bold #004461", # class: 'kc' + Keyword.Declaration: "bold #004461", # class: 'kd' + Keyword.Namespace: "bold #004461", # class: 'kn' + Keyword.Pseudo: "bold #004461", # class: 'kp' + Keyword.Reserved: "bold #004461", # class: 'kr' + Keyword.Type: "bold #004461", # class: 'kt' + + Operator: "#582800", # class: 'o' + Operator.Word: "bold #004461", # class: 'ow' - like keywords + + Punctuation: "bold #000000", # class: 'p' + + # because special names such as Name.Class, Name.Function, etc. + # are not recognized as such later in the parsing, we choose them + # to look the same as ordinary variables. + Name: "#000000", # class: 'n' + Name.Attribute: "#c4a000", # class: 'na' - to be revised + Name.Builtin: "#004461", # class: 'nb' + Name.Builtin.Pseudo: "#3465a4", # class: 'bp' + Name.Class: "#000000", # class: 'nc' - to be revised + Name.Constant: "#000000", # class: 'no' - to be revised + Name.Decorator: "#888", # class: 'nd' - to be revised + Name.Entity: "#ce5c00", # class: 'ni' + Name.Exception: "bold #cc0000", # class: 'ne' + Name.Function: "#000000", # class: 'nf' + Name.Property: "#000000", # class: 'py' + Name.Label: "#f57900", # class: 'nl' + Name.Namespace: "#000000", # class: 'nn' - to be revised + Name.Other: "#000000", # class: 'nx' + Name.Tag: "bold #004461", # class: 'nt' - like a keyword + Name.Variable: "#000000", # class: 'nv' - to be revised + Name.Variable.Class: "#000000", # class: 'vc' - to be revised + Name.Variable.Global: "#000000", # class: 'vg' - to be revised + Name.Variable.Instance: "#000000", # class: 'vi' - to be revised + + Number: "#990000", # class: 'm' + + Literal: "#000000", # class: 'l' + Literal.Date: "#000000", # class: 'ld' + + String: "#4e9a06", # class: 's' + String.Backtick: "#4e9a06", # class: 'sb' + String.Char: "#4e9a06", # class: 'sc' + String.Doc: "italic #8f5902", # class: 'sd' - like a comment + String.Double: "#4e9a06", # class: 's2' + String.Escape: "#4e9a06", # class: 'se' + String.Heredoc: "#4e9a06", # class: 'sh' + String.Interpol: "#4e9a06", # class: 'si' + String.Other: "#4e9a06", # class: 'sx' + String.Regex: "#4e9a06", # class: 'sr' + String.Single: "#4e9a06", # class: 's1' + String.Symbol: "#4e9a06", # class: 'ss' + + Generic: "#000000", # class: 'g' + Generic.Deleted: "#a40000", # class: 'gd' + Generic.Emph: "italic #000000", # class: 'ge' + Generic.Error: "#ef2929", # class: 'gr' + Generic.Heading: "bold #000080", # class: 'gh' + Generic.Inserted: "#00A000", # class: 'gi' + Generic.Output: "#888", # class: 'go' + Generic.Prompt: "#745334", # class: 'gp' + Generic.Strong: "bold #000000", # class: 'gs' + Generic.Subheading: "bold #800080", # class: 'gu' + Generic.Traceback: "bold #a40000", # class: 'gt' + } diff --git a/bundle/jedi-vim/pythonx/parso/docs/conf.py b/bundle/jedi-vim/pythonx/parso/docs/conf.py new file mode 100644 index 000000000..b11d4ce3b --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/docs/conf.py @@ -0,0 +1,290 @@ +# -*- coding: utf-8 -*- +# +# parso documentation build configuration file, created by +# sphinx-quickstart on Wed Dec 26 00:11:34 2012. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath('..')) +sys.path.append(os.path.abspath('_themes')) + +# -- General configuration ----------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.todo', + 'sphinx.ext.intersphinx', 'sphinx.ext.inheritance_diagram'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +source_encoding = 'utf-8' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = 'parso' +copyright = 'parso contributors' + +import parso +from parso.utils import version_info + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '.'.join(str(x) for x in version_info()[:2]) +# The full version, including alpha/beta/rc tags. +release = parso.__version__ + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = [] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'flask' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +html_theme_path = ['_themes'] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +html_sidebars = { + '**': [ + 'sidebarlogo.html', + 'localtoc.html', + #'relations.html', + 'ghbuttons.html', + #'sourcelink.html', + 'searchbox.html' + ] +} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'parsodoc' + +#html_style = 'default.css' # Force usage of default template on RTD + + +# -- Options for LaTeX output -------------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + #'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'parso.tex', 'parso documentation', + 'parso contributors', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output -------------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'parso', 'parso Documentation', + ['parso contributors'], 1) +] + +# If true, show URL addresses after external links. +#man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------------ + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'parso', 'parso documentation', + 'parso contributors', 'parso', 'Awesome Python autocompletion library.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + +# -- Options for todo module --------------------------------------------------- + +todo_include_todos = False + +# -- Options for autodoc module ------------------------------------------------ + +autoclass_content = 'both' +autodoc_member_order = 'bysource' +autodoc_default_flags = [] +#autodoc_default_flags = ['members', 'undoc-members'] + + +# -- Options for intersphinx module -------------------------------------------- + +intersphinx_mapping = { + 'http://docs.python.org/': ('https://docs.python.org/3', None), +} + + +def skip_deprecated(app, what, name, obj, skip, options): + """ + All attributes containing a deprecated note shouldn't be documented + anymore. This makes it even clearer that they are not supported anymore. + """ + doc = obj.__doc__ + return skip or doc and '.. deprecated::' in doc + + +def setup(app): + app.connect('autodoc-skip-member', skip_deprecated) diff --git a/bundle/jedi-vim/pythonx/parso/docs/docs/development.rst b/bundle/jedi-vim/pythonx/parso/docs/docs/development.rst new file mode 100644 index 000000000..e14a1e4b7 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/docs/docs/development.rst @@ -0,0 +1,38 @@ +.. include:: ../global.rst + +Development +=========== + +If you want to contribute anything to |parso|, just open an issue or pull +request to discuss it. We welcome changes! Please check the ``CONTRIBUTING.md`` +file in the repository, first. + + +Deprecations Process +-------------------- + +The deprecation process is as follows: + +1. A deprecation is announced in the next major/minor release. +2. We wait either at least a year & at least two minor releases until we remove + the deprecated functionality. + + +Testing +------- + +The test suite depends on ``pytest``:: + + pip install pytest + +To run the tests use the following:: + + pytest + +If you want to test only a specific Python version (e.g. Python 3.9), it's as +easy as:: + + python3.9 -m pytest + +Tests are also run automatically on `GitHub Actions +`_. diff --git a/bundle/jedi-vim/pythonx/parso/docs/docs/installation.rst b/bundle/jedi-vim/pythonx/parso/docs/docs/installation.rst new file mode 100644 index 000000000..a2de2588a --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/docs/docs/installation.rst @@ -0,0 +1,32 @@ +.. include:: ../global.rst + +Installation and Configuration +============================== + +The preferred way (pip) +----------------------- + +On any system you can install |parso| directly from the Python package index +using pip:: + + sudo pip install parso + + +From git +-------- +If you want to install the current development version (master branch):: + + sudo pip install -e git://github.com/davidhalter/parso.git#egg=parso + + +Manual installation from a downloaded package (not recommended) +--------------------------------------------------------------- + +If you prefer not to use an automated package installer, you can `download +`__ a current copy of +|parso| and install it manually. + +To install it, navigate to the directory containing `setup.py` on your console +and type:: + + sudo python setup.py install diff --git a/bundle/jedi-vim/pythonx/parso/docs/docs/parser-tree.rst b/bundle/jedi-vim/pythonx/parso/docs/docs/parser-tree.rst new file mode 100644 index 000000000..6eb20064a --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/docs/docs/parser-tree.rst @@ -0,0 +1,49 @@ +.. include:: ../global.rst + +.. _parser-tree: + +Parser Tree +=========== + +The parser tree is returned by calling :py:meth:`parso.Grammar.parse`. + +.. note:: Note that parso positions are always 1 based for lines and zero + based for columns. This means the first position in a file is (1, 0). + +Parser Tree Base Classes +------------------------ + +Generally there are two types of classes you will deal with: +:py:class:`parso.tree.Leaf` and :py:class:`parso.tree.BaseNode`. + +.. autoclass:: parso.tree.BaseNode + :show-inheritance: + :members: + +.. autoclass:: parso.tree.Leaf + :show-inheritance: + :members: + +All nodes and leaves have these methods/properties: + +.. autoclass:: parso.tree.NodeOrLeaf + :members: + :undoc-members: + :show-inheritance: + + +Python Parser Tree +------------------ + +.. currentmodule:: parso.python.tree + +.. automodule:: parso.python.tree + :members: + :undoc-members: + :show-inheritance: + + +Utility +------- + +.. autofunction:: parso.tree.search_ancestor diff --git a/bundle/jedi-vim/pythonx/parso/docs/docs/usage.rst b/bundle/jedi-vim/pythonx/parso/docs/docs/usage.rst new file mode 100644 index 000000000..072d2e5e2 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/docs/docs/usage.rst @@ -0,0 +1,68 @@ +.. include:: ../global.rst + +Usage +===== + +|parso| works around grammars. You can simply create Python grammars by calling +:py:func:`parso.load_grammar`. Grammars (with a custom tokenizer and custom parser trees) +can also be created by directly instantiating :py:func:`parso.Grammar`. More information +about the resulting objects can be found in the :ref:`parser tree documentation +`. + +The simplest way of using parso is without even loading a grammar +(:py:func:`parso.parse`): + +.. sourcecode:: python + + >>> import parso + >>> parso.parse('foo + bar') + + +Loading a Grammar +----------------- + +Typically if you want to work with one specific Python version, use: + +.. autofunction:: parso.load_grammar + +Grammar methods +--------------- + +You will get back a grammar object that you can use to parse code and find +issues in it: + +.. autoclass:: parso.Grammar + :members: + :undoc-members: + + +Error Retrieval +--------------- + +|parso| is able to find multiple errors in your source code. Iterating through +those errors yields the following instances: + +.. autoclass:: parso.normalizer.Issue + :members: + :undoc-members: + + +Utility +------- + +|parso| also offers some utility functions that can be really useful: + +.. autofunction:: parso.parse +.. autofunction:: parso.split_lines +.. autofunction:: parso.python_bytes_to_unicode + + +Used By +------- + +- jedi_ (which is used by IPython and a lot of editor plugins). +- mutmut_ (mutation tester) + + +.. _jedi: https://github.com/davidhalter/jedi +.. _mutmut: https://github.com/boxed/mutmut diff --git a/bundle/jedi-vim/pythonx/parso/docs/global.rst b/bundle/jedi-vim/pythonx/parso/docs/global.rst new file mode 100644 index 000000000..ec50aaf0b --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/docs/global.rst @@ -0,0 +1,4 @@ +:orphan: + +.. |jedi| replace:: *jedi* +.. |parso| replace:: *parso* diff --git a/bundle/jedi-vim/pythonx/parso/docs/index.rst b/bundle/jedi-vim/pythonx/parso/docs/index.rst new file mode 100644 index 000000000..b390db35c --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/docs/index.rst @@ -0,0 +1,31 @@ +.. include global.rst + +parso - A Python Parser +======================= + +Release v\ |release|. (:doc:`Installation `) + +.. automodule:: parso + +.. _toc: + +Docs +---- + +.. toctree:: + :maxdepth: 2 + + docs/installation + docs/usage + docs/parser-tree + docs/development + + +.. _resources: + +Resources +--------- + +- `Source Code on Github `_ +- `GitHub Actions Testing `_ +- `Python Package Index `_ diff --git a/bundle/jedi-vim/pythonx/parso/parso/__init__.py b/bundle/jedi-vim/pythonx/parso/parso/__init__.py new file mode 100644 index 000000000..0cceabedc --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/__init__.py @@ -0,0 +1,58 @@ +r""" +Parso is a Python parser that supports error recovery and round-trip parsing +for different Python versions (in multiple Python versions). Parso is also able +to list multiple syntax errors in your python file. + +Parso has been battle-tested by jedi_. It was pulled out of jedi to be useful +for other projects as well. + +Parso consists of a small API to parse Python and analyse the syntax tree. + +.. _jedi: https://github.com/davidhalter/jedi + +A simple example: + +>>> import parso +>>> module = parso.parse('hello + 1', version="3.9") +>>> expr = module.children[0] +>>> expr +PythonNode(arith_expr, [, , ]) +>>> print(expr.get_code()) +hello + 1 +>>> name = expr.children[0] +>>> name + +>>> name.end_pos +(1, 5) +>>> expr.end_pos +(1, 9) + +To list multiple issues: + +>>> grammar = parso.load_grammar() +>>> module = grammar.parse('foo +\nbar\ncontinue') +>>> error1, error2 = grammar.iter_errors(module) +>>> error1.message +'SyntaxError: invalid syntax' +>>> error2.message +"SyntaxError: 'continue' not properly in loop" +""" + +from parso.parser import ParserSyntaxError +from parso.grammar import Grammar, load_grammar +from parso.utils import split_lines, python_bytes_to_unicode + + +__version__ = '0.8.3' + + +def parse(code=None, **kwargs): + """ + A utility function to avoid loading grammars. + Params are documented in :py:meth:`parso.Grammar.parse`. + + :param str version: The version used by :py:func:`parso.load_grammar`. + """ + version = kwargs.pop('version', None) + grammar = load_grammar(version=version) + return grammar.parse(code, **kwargs) diff --git a/bundle/jedi-vim/pythonx/parso/parso/_compatibility.py b/bundle/jedi-vim/pythonx/parso/parso/_compatibility.py new file mode 100644 index 000000000..58b186fc4 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/_compatibility.py @@ -0,0 +1,3 @@ +import platform + +is_pypy = platform.python_implementation() == 'PyPy' diff --git a/bundle/jedi-vim/pythonx/parso/parso/cache.py b/bundle/jedi-vim/pythonx/parso/parso/cache.py new file mode 100644 index 000000000..5592a9fdd --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/cache.py @@ -0,0 +1,275 @@ +import time +import os +import sys +import hashlib +import gc +import shutil +import platform +import logging +import warnings +import pickle +from pathlib import Path +from typing import Dict, Any + +LOG = logging.getLogger(__name__) + +_CACHED_FILE_MINIMUM_SURVIVAL = 60 * 10 # 10 minutes +""" +Cached files should survive at least a few minutes. +""" + +_CACHED_FILE_MAXIMUM_SURVIVAL = 60 * 60 * 24 * 30 +""" +Maximum time for a cached file to survive if it is not +accessed within. +""" + +_CACHED_SIZE_TRIGGER = 600 +""" +This setting limits the amount of cached files. It's basically a way to start +garbage collection. + +The reasoning for this limit being as big as it is, is the following: + +Numpy, Pandas, Matplotlib and Tensorflow together use about 500 files. This +makes Jedi use ~500mb of memory. Since we might want a bit more than those few +libraries, we just increase it a bit. +""" + +_PICKLE_VERSION = 33 +""" +Version number (integer) for file system cache. + +Increment this number when there are any incompatible changes in +the parser tree classes. For example, the following changes +are regarded as incompatible. + +- A class name is changed. +- A class is moved to another module. +- A __slot__ of a class is changed. +""" + +_VERSION_TAG = '%s-%s%s-%s' % ( + platform.python_implementation(), + sys.version_info[0], + sys.version_info[1], + _PICKLE_VERSION +) +""" +Short name for distinguish Python implementations and versions. + +It's a bit similar to `sys.implementation.cache_tag`. +See: http://docs.python.org/3/library/sys.html#sys.implementation +""" + + +def _get_default_cache_path(): + if platform.system().lower() == 'windows': + dir_ = Path(os.getenv('LOCALAPPDATA') or '~', 'Parso', 'Parso') + elif platform.system().lower() == 'darwin': + dir_ = Path('~', 'Library', 'Caches', 'Parso') + else: + dir_ = Path(os.getenv('XDG_CACHE_HOME') or '~/.cache', 'parso') + return dir_.expanduser() + + +_default_cache_path = _get_default_cache_path() +""" +The path where the cache is stored. + +On Linux, this defaults to ``~/.cache/parso/``, on OS X to +``~/Library/Caches/Parso/`` and on Windows to ``%LOCALAPPDATA%\\Parso\\Parso\\``. +On Linux, if environment variable ``$XDG_CACHE_HOME`` is set, +``$XDG_CACHE_HOME/parso`` is used instead of the default one. +""" + +_CACHE_CLEAR_THRESHOLD = 60 * 60 * 24 + + +def _get_cache_clear_lock_path(cache_path=None): + """ + The path where the cache lock is stored. + + Cache lock will prevent continous cache clearing and only allow garbage + collection once a day (can be configured in _CACHE_CLEAR_THRESHOLD). + """ + cache_path = cache_path or _default_cache_path + return cache_path.joinpath("PARSO-CACHE-LOCK") + + +parser_cache: Dict[str, Any] = {} + + +class _NodeCacheItem: + def __init__(self, node, lines, change_time=None): + self.node = node + self.lines = lines + if change_time is None: + change_time = time.time() + self.change_time = change_time + self.last_used = change_time + + +def load_module(hashed_grammar, file_io, cache_path=None): + """ + Returns a module or None, if it fails. + """ + p_time = file_io.get_last_modified() + if p_time is None: + return None + + try: + module_cache_item = parser_cache[hashed_grammar][file_io.path] + if p_time <= module_cache_item.change_time: + module_cache_item.last_used = time.time() + return module_cache_item.node + except KeyError: + return _load_from_file_system( + hashed_grammar, + file_io.path, + p_time, + cache_path=cache_path + ) + + +def _load_from_file_system(hashed_grammar, path, p_time, cache_path=None): + cache_path = _get_hashed_path(hashed_grammar, path, cache_path=cache_path) + try: + if p_time > os.path.getmtime(cache_path): + # Cache is outdated + return None + + with open(cache_path, 'rb') as f: + gc.disable() + try: + module_cache_item = pickle.load(f) + finally: + gc.enable() + except FileNotFoundError: + return None + else: + _set_cache_item(hashed_grammar, path, module_cache_item) + LOG.debug('pickle loaded: %s', path) + return module_cache_item.node + + +def _set_cache_item(hashed_grammar, path, module_cache_item): + if sum(len(v) for v in parser_cache.values()) >= _CACHED_SIZE_TRIGGER: + # Garbage collection of old cache files. + # We are basically throwing everything away that hasn't been accessed + # in 10 minutes. + cutoff_time = time.time() - _CACHED_FILE_MINIMUM_SURVIVAL + for key, path_to_item_map in parser_cache.items(): + parser_cache[key] = { + path: node_item + for path, node_item in path_to_item_map.items() + if node_item.last_used > cutoff_time + } + + parser_cache.setdefault(hashed_grammar, {})[path] = module_cache_item + + +def try_to_save_module(hashed_grammar, file_io, module, lines, pickling=True, cache_path=None): + path = file_io.path + try: + p_time = None if path is None else file_io.get_last_modified() + except OSError: + p_time = None + pickling = False + + item = _NodeCacheItem(module, lines, p_time) + _set_cache_item(hashed_grammar, path, item) + if pickling and path is not None: + try: + _save_to_file_system(hashed_grammar, path, item, cache_path=cache_path) + except PermissionError: + # It's not really a big issue if the cache cannot be saved to the + # file system. It's still in RAM in that case. However we should + # still warn the user that this is happening. + warnings.warn( + 'Tried to save a file to %s, but got permission denied.' % path, + Warning + ) + else: + _remove_cache_and_update_lock(cache_path=cache_path) + + +def _save_to_file_system(hashed_grammar, path, item, cache_path=None): + with open(_get_hashed_path(hashed_grammar, path, cache_path=cache_path), 'wb') as f: + pickle.dump(item, f, pickle.HIGHEST_PROTOCOL) + + +def clear_cache(cache_path=None): + if cache_path is None: + cache_path = _default_cache_path + shutil.rmtree(cache_path) + parser_cache.clear() + + +def clear_inactive_cache( + cache_path=None, + inactivity_threshold=_CACHED_FILE_MAXIMUM_SURVIVAL, +): + if cache_path is None: + cache_path = _default_cache_path + if not cache_path.exists(): + return False + for dirname in os.listdir(cache_path): + version_path = cache_path.joinpath(dirname) + if not version_path.is_dir(): + continue + for file in os.scandir(version_path): + if file.stat().st_atime + _CACHED_FILE_MAXIMUM_SURVIVAL <= time.time(): + try: + os.remove(file.path) + except OSError: # silently ignore all failures + continue + else: + return True + + +def _touch(path): + try: + os.utime(path, None) + except FileNotFoundError: + try: + file = open(path, 'a') + file.close() + except (OSError, IOError): # TODO Maybe log this? + return False + return True + + +def _remove_cache_and_update_lock(cache_path=None): + lock_path = _get_cache_clear_lock_path(cache_path=cache_path) + try: + clear_lock_time = os.path.getmtime(lock_path) + except FileNotFoundError: + clear_lock_time = None + if ( + clear_lock_time is None # first time + or clear_lock_time + _CACHE_CLEAR_THRESHOLD <= time.time() + ): + if not _touch(lock_path): + # First make sure that as few as possible other cleanup jobs also + # get started. There is still a race condition but it's probably + # not a big problem. + return False + + clear_inactive_cache(cache_path=cache_path) + + +def _get_hashed_path(hashed_grammar, path, cache_path=None): + directory = _get_cache_directory_path(cache_path=cache_path) + + file_hash = hashlib.sha256(str(path).encode("utf-8")).hexdigest() + return os.path.join(directory, '%s-%s.pkl' % (hashed_grammar, file_hash)) + + +def _get_cache_directory_path(cache_path=None): + if cache_path is None: + cache_path = _default_cache_path + directory = cache_path.joinpath(_VERSION_TAG) + if not directory.exists(): + os.makedirs(directory) + return directory diff --git a/bundle/jedi-vim/pythonx/parso/parso/file_io.py b/bundle/jedi-vim/pythonx/parso/parso/file_io.py new file mode 100644 index 000000000..568ce9d48 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/file_io.py @@ -0,0 +1,38 @@ +import os +from pathlib import Path +from typing import Union + + +class FileIO: + def __init__(self, path: Union[os.PathLike, str]): + if isinstance(path, str): + path = Path(path) + self.path = path + + def read(self): # Returns bytes/str + # We would like to read unicode here, but we cannot, because we are not + # sure if it is a valid unicode file. Therefore just read whatever is + # here. + with open(self.path, 'rb') as f: + return f.read() + + def get_last_modified(self): + """ + Returns float - timestamp or None, if path doesn't exist. + """ + try: + return os.path.getmtime(self.path) + except FileNotFoundError: + return None + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, self.path) + + +class KnownContentFileIO(FileIO): + def __init__(self, path, content): + super().__init__(path) + self._content = content + + def read(self): + return self._content diff --git a/bundle/jedi-vim/pythonx/parso/parso/grammar.py b/bundle/jedi-vim/pythonx/parso/parso/grammar.py new file mode 100644 index 000000000..43b7be940 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/grammar.py @@ -0,0 +1,264 @@ +import hashlib +import os +from typing import Generic, TypeVar, Union, Dict, Optional, Any +from pathlib import Path + +from parso._compatibility import is_pypy +from parso.pgen2 import generate_grammar +from parso.utils import split_lines, python_bytes_to_unicode, \ + PythonVersionInfo, parse_version_string +from parso.python.diff import DiffParser +from parso.python.tokenize import tokenize_lines, tokenize +from parso.python.token import PythonTokenTypes +from parso.cache import parser_cache, load_module, try_to_save_module +from parso.parser import BaseParser +from parso.python.parser import Parser as PythonParser +from parso.python.errors import ErrorFinderConfig +from parso.python import pep8 +from parso.file_io import FileIO, KnownContentFileIO +from parso.normalizer import RefactoringNormalizer, NormalizerConfig + +_loaded_grammars: Dict[str, 'Grammar'] = {} + +_NodeT = TypeVar("_NodeT") + + +class Grammar(Generic[_NodeT]): + """ + :py:func:`parso.load_grammar` returns instances of this class. + + Creating custom none-python grammars by calling this is not supported, yet. + + :param text: A BNF representation of your grammar. + """ + _start_nonterminal: str + _error_normalizer_config: Optional[ErrorFinderConfig] = None + _token_namespace: Any = None + _default_normalizer_config: NormalizerConfig = pep8.PEP8NormalizerConfig() + + def __init__(self, text: str, *, tokenizer, parser=BaseParser, diff_parser=None): + self._pgen_grammar = generate_grammar( + text, + token_namespace=self._get_token_namespace() + ) + self._parser = parser + self._tokenizer = tokenizer + self._diff_parser = diff_parser + self._hashed = hashlib.sha256(text.encode("utf-8")).hexdigest() + + def parse(self, + code: Union[str, bytes] = None, + *, + error_recovery=True, + path: Union[os.PathLike, str] = None, + start_symbol: str = None, + cache=False, + diff_cache=False, + cache_path: Union[os.PathLike, str] = None, + file_io: FileIO = None) -> _NodeT: + """ + If you want to parse a Python file you want to start here, most likely. + + If you need finer grained control over the parsed instance, there will be + other ways to access it. + + :param str code: A unicode or bytes string. When it's not possible to + decode bytes to a string, returns a + :py:class:`UnicodeDecodeError`. + :param bool error_recovery: If enabled, any code will be returned. If + it is invalid, it will be returned as an error node. If disabled, + you will get a ParseError when encountering syntax errors in your + code. + :param str start_symbol: The grammar rule (nonterminal) that you want + to parse. Only allowed to be used when error_recovery is False. + :param str path: The path to the file you want to open. Only needed for caching. + :param bool cache: Keeps a copy of the parser tree in RAM and on disk + if a path is given. Returns the cached trees if the corresponding + files on disk have not changed. Note that this stores pickle files + on your file system (e.g. for Linux in ``~/.cache/parso/``). + :param bool diff_cache: Diffs the cached python module against the new + code and tries to parse only the parts that have changed. Returns + the same (changed) module that is found in cache. Using this option + requires you to not do anything anymore with the cached modules + under that path, because the contents of it might change. This + option is still somewhat experimental. If you want stability, + please don't use it. + :param bool cache_path: If given saves the parso cache in this + directory. If not given, defaults to the default cache places on + each platform. + + :return: A subclass of :py:class:`parso.tree.NodeOrLeaf`. Typically a + :py:class:`parso.python.tree.Module`. + """ + if code is None and path is None and file_io is None: + raise TypeError("Please provide either code or a path.") + + if isinstance(path, str): + path = Path(path) + if isinstance(cache_path, str): + cache_path = Path(cache_path) + + if start_symbol is None: + start_symbol = self._start_nonterminal + + if error_recovery and start_symbol != 'file_input': + raise NotImplementedError("This is currently not implemented.") + + if file_io is None: + if code is None: + file_io = FileIO(path) # type: ignore + else: + file_io = KnownContentFileIO(path, code) + + if cache and file_io.path is not None: + module_node = load_module(self._hashed, file_io, cache_path=cache_path) + if module_node is not None: + return module_node # type: ignore + + if code is None: + code = file_io.read() + code = python_bytes_to_unicode(code) + + lines = split_lines(code, keepends=True) + if diff_cache: + if self._diff_parser is None: + raise TypeError("You have to define a diff parser to be able " + "to use this option.") + try: + module_cache_item = parser_cache[self._hashed][file_io.path] + except KeyError: + pass + else: + module_node = module_cache_item.node + old_lines = module_cache_item.lines + if old_lines == lines: + return module_node # type: ignore + + new_node = self._diff_parser( + self._pgen_grammar, self._tokenizer, module_node + ).update( + old_lines=old_lines, + new_lines=lines + ) + try_to_save_module(self._hashed, file_io, new_node, lines, + # Never pickle in pypy, it's slow as hell. + pickling=cache and not is_pypy, + cache_path=cache_path) + return new_node # type: ignore + + tokens = self._tokenizer(lines) + + p = self._parser( + self._pgen_grammar, + error_recovery=error_recovery, + start_nonterminal=start_symbol + ) + root_node = p.parse(tokens=tokens) + + if cache or diff_cache: + try_to_save_module(self._hashed, file_io, root_node, lines, + # Never pickle in pypy, it's slow as hell. + pickling=cache and not is_pypy, + cache_path=cache_path) + return root_node # type: ignore + + def _get_token_namespace(self): + ns = self._token_namespace + if ns is None: + raise ValueError("The token namespace should be set.") + return ns + + def iter_errors(self, node): + """ + Given a :py:class:`parso.tree.NodeOrLeaf` returns a generator of + :py:class:`parso.normalizer.Issue` objects. For Python this is + a list of syntax/indentation errors. + """ + if self._error_normalizer_config is None: + raise ValueError("No error normalizer specified for this grammar.") + + return self._get_normalizer_issues(node, self._error_normalizer_config) + + def refactor(self, base_node, node_to_str_map): + return RefactoringNormalizer(node_to_str_map).walk(base_node) + + def _get_normalizer(self, normalizer_config): + if normalizer_config is None: + normalizer_config = self._default_normalizer_config + if normalizer_config is None: + raise ValueError("You need to specify a normalizer, because " + "there's no default normalizer for this tree.") + return normalizer_config.create_normalizer(self) + + def _normalize(self, node, normalizer_config=None): + """ + TODO this is not public, yet. + The returned code will be normalized, e.g. PEP8 for Python. + """ + normalizer = self._get_normalizer(normalizer_config) + return normalizer.walk(node) + + def _get_normalizer_issues(self, node, normalizer_config=None): + normalizer = self._get_normalizer(normalizer_config) + normalizer.walk(node) + return normalizer.issues + + def __repr__(self): + nonterminals = self._pgen_grammar.nonterminal_to_dfas.keys() + txt = ' '.join(list(nonterminals)[:3]) + ' ...' + return '<%s:%s>' % (self.__class__.__name__, txt) + + +class PythonGrammar(Grammar): + _error_normalizer_config = ErrorFinderConfig() + _token_namespace = PythonTokenTypes + _start_nonterminal = 'file_input' + + def __init__(self, version_info: PythonVersionInfo, bnf_text: str): + super().__init__( + bnf_text, + tokenizer=self._tokenize_lines, + parser=PythonParser, + diff_parser=DiffParser + ) + self.version_info = version_info + + def _tokenize_lines(self, lines, **kwargs): + return tokenize_lines(lines, version_info=self.version_info, **kwargs) + + def _tokenize(self, code): + # Used by Jedi. + return tokenize(code, version_info=self.version_info) + + +def load_grammar(*, version: str = None, path: str = None): + """ + Loads a :py:class:`parso.Grammar`. The default version is the current Python + version. + + :param str version: A python version string, e.g. ``version='3.8'``. + :param str path: A path to a grammar file + """ + version_info = parse_version_string(version) + + file = path or os.path.join( + 'python', + 'grammar%s%s.txt' % (version_info.major, version_info.minor) + ) + + global _loaded_grammars + path = os.path.join(os.path.dirname(__file__), file) + try: + return _loaded_grammars[path] + except KeyError: + try: + with open(path) as f: + bnf_text = f.read() + + grammar = PythonGrammar(version_info, bnf_text) + return _loaded_grammars.setdefault(path, grammar) + except FileNotFoundError: + message = "Python version %s.%s is currently not supported." % ( + version_info.major, version_info.minor + ) + raise NotImplementedError(message) diff --git a/bundle/jedi-vim/pythonx/parso/parso/normalizer.py b/bundle/jedi-vim/pythonx/parso/parso/normalizer.py new file mode 100644 index 000000000..a95f029eb --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/normalizer.py @@ -0,0 +1,198 @@ +from contextlib import contextmanager +from typing import Dict, List + + +class _NormalizerMeta(type): + def __new__(cls, name, bases, dct): + new_cls = type.__new__(cls, name, bases, dct) + new_cls.rule_value_classes = {} + new_cls.rule_type_classes = {} + return new_cls + + +class Normalizer(metaclass=_NormalizerMeta): + _rule_type_instances: Dict[str, List[type]] = {} + _rule_value_instances: Dict[str, List[type]] = {} + + def __init__(self, grammar, config): + self.grammar = grammar + self._config = config + self.issues = [] + + self._rule_type_instances = self._instantiate_rules('rule_type_classes') + self._rule_value_instances = self._instantiate_rules('rule_value_classes') + + def _instantiate_rules(self, attr): + dct = {} + for base in type(self).mro(): + rules_map = getattr(base, attr, {}) + for type_, rule_classes in rules_map.items(): + new = [rule_cls(self) for rule_cls in rule_classes] + dct.setdefault(type_, []).extend(new) + return dct + + def walk(self, node): + self.initialize(node) + value = self.visit(node) + self.finalize() + return value + + def visit(self, node): + try: + children = node.children + except AttributeError: + return self.visit_leaf(node) + else: + with self.visit_node(node): + return ''.join(self.visit(child) for child in children) + + @contextmanager + def visit_node(self, node): + self._check_type_rules(node) + yield + + def _check_type_rules(self, node): + for rule in self._rule_type_instances.get(node.type, []): + rule.feed_node(node) + + def visit_leaf(self, leaf): + self._check_type_rules(leaf) + + for rule in self._rule_value_instances.get(leaf.value, []): + rule.feed_node(leaf) + + return leaf.prefix + leaf.value + + def initialize(self, node): + pass + + def finalize(self): + pass + + def add_issue(self, node, code, message): + issue = Issue(node, code, message) + if issue not in self.issues: + self.issues.append(issue) + return True + + @classmethod + def register_rule(cls, *, value=None, values=(), type=None, types=()): + """ + Use it as a class decorator:: + + normalizer = Normalizer('grammar', 'config') + @normalizer.register_rule(value='foo') + class MyRule(Rule): + error_code = 42 + """ + values = list(values) + types = list(types) + if value is not None: + values.append(value) + if type is not None: + types.append(type) + + if not values and not types: + raise ValueError("You must register at least something.") + + def decorator(rule_cls): + for v in values: + cls.rule_value_classes.setdefault(v, []).append(rule_cls) + for t in types: + cls.rule_type_classes.setdefault(t, []).append(rule_cls) + return rule_cls + + return decorator + + +class NormalizerConfig: + normalizer_class = Normalizer + + def create_normalizer(self, grammar): + if self.normalizer_class is None: + return None + + return self.normalizer_class(grammar, self) + + +class Issue: + def __init__(self, node, code, message): + self.code = code + """ + An integer code that stands for the type of error. + """ + self.message = message + """ + A message (string) for the issue. + """ + self.start_pos = node.start_pos + """ + The start position position of the error as a tuple (line, column). As + always in |parso| the first line is 1 and the first column 0. + """ + self.end_pos = node.end_pos + + def __eq__(self, other): + return self.start_pos == other.start_pos and self.code == other.code + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash((self.code, self.start_pos)) + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.code) + + +class Rule: + code: int + message: str + + def __init__(self, normalizer): + self._normalizer = normalizer + + def is_issue(self, node): + raise NotImplementedError() + + def get_node(self, node): + return node + + def _get_message(self, message, node): + if message is None: + message = self.message + if message is None: + raise ValueError("The message on the class is not set.") + return message + + def add_issue(self, node, code=None, message=None): + if code is None: + code = self.code + if code is None: + raise ValueError("The error code on the class is not set.") + + message = self._get_message(message, node) + + self._normalizer.add_issue(node, code, message) + + def feed_node(self, node): + if self.is_issue(node): + issue_node = self.get_node(node) + self.add_issue(issue_node) + + +class RefactoringNormalizer(Normalizer): + def __init__(self, node_to_str_map): + self._node_to_str_map = node_to_str_map + + def visit(self, node): + try: + return self._node_to_str_map[node] + except KeyError: + return super().visit(node) + + def visit_leaf(self, leaf): + try: + return self._node_to_str_map[leaf] + except KeyError: + return super().visit_leaf(leaf) diff --git a/bundle/jedi-vim/pythonx/parso/parso/parser.py b/bundle/jedi-vim/pythonx/parso/parso/parser.py new file mode 100644 index 000000000..37466435d --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/parser.py @@ -0,0 +1,210 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +# Modifications: +# Copyright David Halter and Contributors +# Modifications are dual-licensed: MIT and PSF. +# 99% of the code is different from pgen2, now. + +""" +The ``Parser`` tries to convert the available Python code in an easy to read +format, something like an abstract syntax tree. The classes who represent this +tree, are sitting in the :mod:`parso.tree` module. + +The Python module ``tokenize`` is a very important part in the ``Parser``, +because it splits the code into different words (tokens). Sometimes it looks a +bit messy. Sorry for that! You might ask now: "Why didn't you use the ``ast`` +module for this? Well, ``ast`` does a very good job understanding proper Python +code, but fails to work as soon as there's a single line of broken code. + +There's one important optimization that needs to be known: Statements are not +being parsed completely. ``Statement`` is just a representation of the tokens +within the statement. This lowers memory usage and cpu time and reduces the +complexity of the ``Parser`` (there's another parser sitting inside +``Statement``, which produces ``Array`` and ``Call``). +""" +from typing import Dict, Type + +from parso import tree +from parso.pgen2.generator import ReservedString + + +class ParserSyntaxError(Exception): + """ + Contains error information about the parser tree. + + May be raised as an exception. + """ + def __init__(self, message, error_leaf): + self.message = message + self.error_leaf = error_leaf + + +class InternalParseError(Exception): + """ + Exception to signal the parser is stuck and error recovery didn't help. + Basically this shouldn't happen. It's a sign that something is really + wrong. + """ + + def __init__(self, msg, type_, value, start_pos): + Exception.__init__(self, "%s: type=%r, value=%r, start_pos=%r" % + (msg, type_.name, value, start_pos)) + self.msg = msg + self.type = type + self.value = value + self.start_pos = start_pos + + +class Stack(list): + def _allowed_transition_names_and_token_types(self): + def iterate(): + # An API just for Jedi. + for stack_node in reversed(self): + for transition in stack_node.dfa.transitions: + if isinstance(transition, ReservedString): + yield transition.value + else: + yield transition # A token type + + if not stack_node.dfa.is_final: + break + + return list(iterate()) + + +class StackNode: + def __init__(self, dfa): + self.dfa = dfa + self.nodes = [] + + @property + def nonterminal(self): + return self.dfa.from_rule + + def __repr__(self): + return '%s(%s, %s)' % (self.__class__.__name__, self.dfa, self.nodes) + + +def _token_to_transition(grammar, type_, value): + # Map from token to label + if type_.value.contains_syntax: + # Check for reserved words (keywords) + try: + return grammar.reserved_syntax_strings[value] + except KeyError: + pass + + return type_ + + +class BaseParser: + """Parser engine. + + A Parser instance contains state pertaining to the current token + sequence, and should not be used concurrently by different threads + to parse separate token sequences. + + See python/tokenize.py for how to get input tokens by a string. + + When a syntax error occurs, error_recovery() is called. + """ + + node_map: Dict[str, Type[tree.BaseNode]] = {} + default_node = tree.Node + + leaf_map: Dict[str, Type[tree.Leaf]] = {} + default_leaf = tree.Leaf + + def __init__(self, pgen_grammar, start_nonterminal='file_input', error_recovery=False): + self._pgen_grammar = pgen_grammar + self._start_nonterminal = start_nonterminal + self._error_recovery = error_recovery + + def parse(self, tokens): + first_dfa = self._pgen_grammar.nonterminal_to_dfas[self._start_nonterminal][0] + self.stack = Stack([StackNode(first_dfa)]) + + for token in tokens: + self._add_token(token) + + while True: + tos = self.stack[-1] + if not tos.dfa.is_final: + # We never broke out -- EOF is too soon -- Unfinished statement. + # However, the error recovery might have added the token again, if + # the stack is empty, we're fine. + raise InternalParseError( + "incomplete input", token.type, token.string, token.start_pos + ) + + if len(self.stack) > 1: + self._pop() + else: + return self.convert_node(tos.nonterminal, tos.nodes) + + def error_recovery(self, token): + if self._error_recovery: + raise NotImplementedError("Error Recovery is not implemented") + else: + type_, value, start_pos, prefix = token + error_leaf = tree.ErrorLeaf(type_, value, start_pos, prefix) + raise ParserSyntaxError('SyntaxError: invalid syntax', error_leaf) + + def convert_node(self, nonterminal, children): + try: + node = self.node_map[nonterminal](children) + except KeyError: + node = self.default_node(nonterminal, children) + return node + + def convert_leaf(self, type_, value, prefix, start_pos): + try: + return self.leaf_map[type_](value, start_pos, prefix) + except KeyError: + return self.default_leaf(value, start_pos, prefix) + + def _add_token(self, token): + """ + This is the only core function for parsing. Here happens basically + everything. Everything is well prepared by the parser generator and we + only apply the necessary steps here. + """ + grammar = self._pgen_grammar + stack = self.stack + type_, value, start_pos, prefix = token + transition = _token_to_transition(grammar, type_, value) + + while True: + try: + plan = stack[-1].dfa.transitions[transition] + break + except KeyError: + if stack[-1].dfa.is_final: + self._pop() + else: + self.error_recovery(token) + return + except IndexError: + raise InternalParseError("too much input", type_, value, start_pos) + + stack[-1].dfa = plan.next_dfa + + for push in plan.dfa_pushes: + stack.append(StackNode(push)) + + leaf = self.convert_leaf(type_, value, prefix, start_pos) + stack[-1].nodes.append(leaf) + + def _pop(self): + tos = self.stack.pop() + # If there's exactly one child, return that child instead of + # creating a new node. We still create expr_stmt and + # file_input though, because a lot of Jedi depends on its + # logic. + if len(tos.nodes) == 1: + new_node = tos.nodes[0] + else: + new_node = self.convert_node(tos.dfa.from_rule, tos.nodes) + + self.stack[-1].nodes.append(new_node) diff --git a/bundle/jedi-vim/pythonx/parso/parso/pgen2/__init__.py b/bundle/jedi-vim/pythonx/parso/parso/pgen2/__init__.py new file mode 100644 index 000000000..d4d9dcdc4 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/pgen2/__init__.py @@ -0,0 +1,10 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +# Modifications: +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. +# Copyright 2014 David Halter and Contributors +# Modifications are dual-licensed: MIT and PSF. + +from parso.pgen2.generator import generate_grammar diff --git a/bundle/jedi-vim/pythonx/parso/parso/pgen2/generator.py b/bundle/jedi-vim/pythonx/parso/parso/pgen2/generator.py new file mode 100644 index 000000000..db6e1cb32 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/pgen2/generator.py @@ -0,0 +1,382 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +# Modifications: +# Copyright David Halter and Contributors +# Modifications are dual-licensed: MIT and PSF. + +""" +This module defines the data structures used to represent a grammar. + +Specifying grammars in pgen is possible with this grammar:: + + grammar: (NEWLINE | rule)* ENDMARKER + rule: NAME ':' rhs NEWLINE + rhs: items ('|' items)* + items: item+ + item: '[' rhs ']' | atom ['+' | '*'] + atom: '(' rhs ')' | NAME | STRING + +This grammar is self-referencing. + +This parser generator (pgen2) was created by Guido Rossum and used for lib2to3. +Most of the code has been refactored to make it more Pythonic. Since this was a +"copy" of the CPython Parser parser "pgen", there was some work needed to make +it more readable. It should also be slightly faster than the original pgen2, +because we made some optimizations. +""" + +from ast import literal_eval +from typing import TypeVar, Generic, Mapping, Sequence, Set, Union + +from parso.pgen2.grammar_parser import GrammarParser, NFAState + +_TokenTypeT = TypeVar("_TokenTypeT") + + +class Grammar(Generic[_TokenTypeT]): + """ + Once initialized, this class supplies the grammar tables for the + parsing engine implemented by parse.py. The parsing engine + accesses the instance variables directly. + + The only important part in this parsers are dfas and transitions between + dfas. + """ + + def __init__(self, + start_nonterminal: str, + rule_to_dfas: Mapping[str, Sequence['DFAState[_TokenTypeT]']], + reserved_syntax_strings: Mapping[str, 'ReservedString']): + self.nonterminal_to_dfas = rule_to_dfas + self.reserved_syntax_strings = reserved_syntax_strings + self.start_nonterminal = start_nonterminal + + +class DFAPlan: + """ + Plans are used for the parser to create stack nodes and do the proper + DFA state transitions. + """ + def __init__(self, next_dfa: 'DFAState', dfa_pushes: Sequence['DFAState'] = []): + self.next_dfa = next_dfa + self.dfa_pushes = dfa_pushes + + def __repr__(self): + return '%s(%s, %s)' % (self.__class__.__name__, self.next_dfa, self.dfa_pushes) + + +class DFAState(Generic[_TokenTypeT]): + """ + The DFAState object is the core class for pretty much anything. DFAState + are the vertices of an ordered graph while arcs and transitions are the + edges. + + Arcs are the initial edges, where most DFAStates are not connected and + transitions are then calculated to connect the DFA state machines that have + different nonterminals. + """ + def __init__(self, from_rule: str, nfa_set: Set[NFAState], final: NFAState): + assert isinstance(nfa_set, set) + assert isinstance(next(iter(nfa_set)), NFAState) + assert isinstance(final, NFAState) + self.from_rule = from_rule + self.nfa_set = nfa_set + # map from terminals/nonterminals to DFAState + self.arcs: Mapping[str, DFAState] = {} + # In an intermediary step we set these nonterminal arcs (which has the + # same structure as arcs). These don't contain terminals anymore. + self.nonterminal_arcs: Mapping[str, DFAState] = {} + + # Transitions are basically the only thing that the parser is using + # with is_final. Everyting else is purely here to create a parser. + self.transitions: Mapping[Union[_TokenTypeT, ReservedString], DFAPlan] = {} + self.is_final = final in nfa_set + + def add_arc(self, next_, label): + assert isinstance(label, str) + assert label not in self.arcs + assert isinstance(next_, DFAState) + self.arcs[label] = next_ + + def unifystate(self, old, new): + for label, next_ in self.arcs.items(): + if next_ is old: + self.arcs[label] = new + + def __eq__(self, other): + # Equality test -- ignore the nfa_set instance variable + assert isinstance(other, DFAState) + if self.is_final != other.is_final: + return False + # Can't just return self.arcs == other.arcs, because that + # would invoke this method recursively, with cycles... + if len(self.arcs) != len(other.arcs): + return False + for label, next_ in self.arcs.items(): + if next_ is not other.arcs.get(label): + return False + return True + + def __repr__(self): + return '<%s: %s is_final=%s>' % ( + self.__class__.__name__, self.from_rule, self.is_final + ) + + +class ReservedString: + """ + Most grammars will have certain keywords and operators that are mentioned + in the grammar as strings (e.g. "if") and not token types (e.g. NUMBER). + This class basically is the former. + """ + + def __init__(self, value: str): + self.value = value + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, self.value) + + +def _simplify_dfas(dfas): + """ + This is not theoretically optimal, but works well enough. + Algorithm: repeatedly look for two states that have the same + set of arcs (same labels pointing to the same nodes) and + unify them, until things stop changing. + + dfas is a list of DFAState instances + """ + changes = True + while changes: + changes = False + for i, state_i in enumerate(dfas): + for j in range(i + 1, len(dfas)): + state_j = dfas[j] + if state_i == state_j: + del dfas[j] + for state in dfas: + state.unifystate(state_j, state_i) + changes = True + break + + +def _make_dfas(start, finish): + """ + Uses the powerset construction algorithm to create DFA states from sets of + NFA states. + + Also does state reduction if some states are not needed. + """ + # To turn an NFA into a DFA, we define the states of the DFA + # to correspond to *sets* of states of the NFA. Then do some + # state reduction. + assert isinstance(start, NFAState) + assert isinstance(finish, NFAState) + + def addclosure(nfa_state, base_nfa_set): + assert isinstance(nfa_state, NFAState) + if nfa_state in base_nfa_set: + return + base_nfa_set.add(nfa_state) + for nfa_arc in nfa_state.arcs: + if nfa_arc.nonterminal_or_string is None: + addclosure(nfa_arc.next, base_nfa_set) + + base_nfa_set = set() + addclosure(start, base_nfa_set) + states = [DFAState(start.from_rule, base_nfa_set, finish)] + for state in states: # NB states grows while we're iterating + arcs = {} + # Find state transitions and store them in arcs. + for nfa_state in state.nfa_set: + for nfa_arc in nfa_state.arcs: + if nfa_arc.nonterminal_or_string is not None: + nfa_set = arcs.setdefault(nfa_arc.nonterminal_or_string, set()) + addclosure(nfa_arc.next, nfa_set) + + # Now create the dfa's with no None's in arcs anymore. All Nones have + # been eliminated and state transitions (arcs) are properly defined, we + # just need to create the dfa's. + for nonterminal_or_string, nfa_set in arcs.items(): + for nested_state in states: + if nested_state.nfa_set == nfa_set: + # The DFA state already exists for this rule. + break + else: + nested_state = DFAState(start.from_rule, nfa_set, finish) + states.append(nested_state) + + state.add_arc(nested_state, nonterminal_or_string) + return states # List of DFAState instances; first one is start + + +def _dump_nfa(start, finish): + print("Dump of NFA for", start.from_rule) + todo = [start] + for i, state in enumerate(todo): + print(" State", i, state is finish and "(final)" or "") + for arc in state.arcs: + label, next_ = arc.nonterminal_or_string, arc.next + if next_ in todo: + j = todo.index(next_) + else: + j = len(todo) + todo.append(next_) + if label is None: + print(" -> %d" % j) + else: + print(" %s -> %d" % (label, j)) + + +def _dump_dfas(dfas): + print("Dump of DFA for", dfas[0].from_rule) + for i, state in enumerate(dfas): + print(" State", i, state.is_final and "(final)" or "") + for nonterminal, next_ in state.arcs.items(): + print(" %s -> %d" % (nonterminal, dfas.index(next_))) + + +def generate_grammar(bnf_grammar: str, token_namespace) -> Grammar: + """ + ``bnf_text`` is a grammar in extended BNF (using * for repetition, + for + at-least-once repetition, [] for optional parts, | for alternatives and () + for grouping). + + It's not EBNF according to ISO/IEC 14977. It's a dialect Python uses in its + own parser. + """ + rule_to_dfas = {} + start_nonterminal = None + for nfa_a, nfa_z in GrammarParser(bnf_grammar).parse(): + # _dump_nfa(nfa_a, nfa_z) + dfas = _make_dfas(nfa_a, nfa_z) + # _dump_dfas(dfas) + # oldlen = len(dfas) + _simplify_dfas(dfas) + # newlen = len(dfas) + rule_to_dfas[nfa_a.from_rule] = dfas + # print(nfa_a.from_rule, oldlen, newlen) + + if start_nonterminal is None: + start_nonterminal = nfa_a.from_rule + + reserved_strings: Mapping[str, ReservedString] = {} + for nonterminal, dfas in rule_to_dfas.items(): + for dfa_state in dfas: + for terminal_or_nonterminal, next_dfa in dfa_state.arcs.items(): + if terminal_or_nonterminal in rule_to_dfas: + dfa_state.nonterminal_arcs[terminal_or_nonterminal] = next_dfa + else: + transition = _make_transition( + token_namespace, + reserved_strings, + terminal_or_nonterminal + ) + dfa_state.transitions[transition] = DFAPlan(next_dfa) + + _calculate_tree_traversal(rule_to_dfas) + return Grammar(start_nonterminal, rule_to_dfas, reserved_strings) # type: ignore + + +def _make_transition(token_namespace, reserved_syntax_strings, label): + """ + Creates a reserved string ("if", "for", "*", ...) or returns the token type + (NUMBER, STRING, ...) for a given grammar terminal. + """ + if label[0].isalpha(): + # A named token (e.g. NAME, NUMBER, STRING) + return getattr(token_namespace, label) + else: + # Either a keyword or an operator + assert label[0] in ('"', "'"), label + assert not label.startswith('"""') and not label.startswith("'''") + value = literal_eval(label) + try: + return reserved_syntax_strings[value] + except KeyError: + r = reserved_syntax_strings[value] = ReservedString(value) + return r + + +def _calculate_tree_traversal(nonterminal_to_dfas): + """ + By this point we know how dfas can move around within a stack node, but we + don't know how we can add a new stack node (nonterminal transitions). + """ + # Map from grammar rule (nonterminal) name to a set of tokens. + first_plans = {} + + nonterminals = list(nonterminal_to_dfas.keys()) + nonterminals.sort() + for nonterminal in nonterminals: + if nonterminal not in first_plans: + _calculate_first_plans(nonterminal_to_dfas, first_plans, nonterminal) + + # Now that we have calculated the first terminals, we are sure that + # there is no left recursion. + + for dfas in nonterminal_to_dfas.values(): + for dfa_state in dfas: + transitions = dfa_state.transitions + for nonterminal, next_dfa in dfa_state.nonterminal_arcs.items(): + for transition, pushes in first_plans[nonterminal].items(): + if transition in transitions: + prev_plan = transitions[transition] + # Make sure these are sorted so that error messages are + # at least deterministic + choices = sorted([ + ( + prev_plan.dfa_pushes[0].from_rule + if prev_plan.dfa_pushes + else prev_plan.next_dfa.from_rule + ), + ( + pushes[0].from_rule + if pushes else next_dfa.from_rule + ), + ]) + raise ValueError( + "Rule %s is ambiguous; given a %s token, we " + "can't determine if we should evaluate %s or %s." + % ( + ( + dfa_state.from_rule, + transition, + ) + tuple(choices) + ) + ) + transitions[transition] = DFAPlan(next_dfa, pushes) + + +def _calculate_first_plans(nonterminal_to_dfas, first_plans, nonterminal): + """ + Calculates the first plan in the first_plans dictionary for every given + nonterminal. This is going to be used to know when to create stack nodes. + """ + dfas = nonterminal_to_dfas[nonterminal] + new_first_plans = {} + first_plans[nonterminal] = None # dummy to detect left recursion + # We only need to check the first dfa. All the following ones are not + # interesting to find first terminals. + state = dfas[0] + for transition, next_ in state.transitions.items(): + # It's a string. We have finally found a possible first token. + new_first_plans[transition] = [next_.next_dfa] + + for nonterminal2, next_ in state.nonterminal_arcs.items(): + # It's a nonterminal and we have either a left recursion issue + # in the grammar or we have to recurse. + try: + first_plans2 = first_plans[nonterminal2] + except KeyError: + first_plans2 = _calculate_first_plans(nonterminal_to_dfas, first_plans, nonterminal2) + else: + if first_plans2 is None: + raise ValueError("left recursion for rule %r" % nonterminal) + + for t, pushes in first_plans2.items(): + new_first_plans[t] = [next_] + pushes + + first_plans[nonterminal] = new_first_plans + return new_first_plans diff --git a/bundle/jedi-vim/pythonx/parso/parso/pgen2/grammar_parser.py b/bundle/jedi-vim/pythonx/parso/parso/pgen2/grammar_parser.py new file mode 100644 index 000000000..582efb43a --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/pgen2/grammar_parser.py @@ -0,0 +1,160 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +# Modifications: +# Copyright David Halter and Contributors +# Modifications are dual-licensed: MIT and PSF. +from typing import Optional, Iterator, Tuple, List + +from parso.python.tokenize import tokenize +from parso.utils import parse_version_string +from parso.python.token import PythonTokenTypes + + +class NFAArc: + def __init__(self, next_: 'NFAState', nonterminal_or_string: Optional[str]): + self.next: NFAState = next_ + self.nonterminal_or_string: Optional[str] = nonterminal_or_string + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.nonterminal_or_string) + + +class NFAState: + def __init__(self, from_rule: str): + self.from_rule: str = from_rule + self.arcs: List[NFAArc] = [] + + def add_arc(self, next_, nonterminal_or_string=None): + assert nonterminal_or_string is None or isinstance(nonterminal_or_string, str) + assert isinstance(next_, NFAState) + self.arcs.append(NFAArc(next_, nonterminal_or_string)) + + def __repr__(self): + return '<%s: from %s>' % (self.__class__.__name__, self.from_rule) + + +class GrammarParser: + """ + The parser for Python grammar files. + """ + def __init__(self, bnf_grammar: str): + self._bnf_grammar = bnf_grammar + self.generator = tokenize( + bnf_grammar, + version_info=parse_version_string('3.9') + ) + self._gettoken() # Initialize lookahead + + def parse(self) -> Iterator[Tuple[NFAState, NFAState]]: + # grammar: (NEWLINE | rule)* ENDMARKER + while self.type != PythonTokenTypes.ENDMARKER: + while self.type == PythonTokenTypes.NEWLINE: + self._gettoken() + + # rule: NAME ':' rhs NEWLINE + self._current_rule_name = self._expect(PythonTokenTypes.NAME) + self._expect(PythonTokenTypes.OP, ':') + + a, z = self._parse_rhs() + self._expect(PythonTokenTypes.NEWLINE) + + yield a, z + + def _parse_rhs(self): + # rhs: items ('|' items)* + a, z = self._parse_items() + if self.value != "|": + return a, z + else: + aa = NFAState(self._current_rule_name) + zz = NFAState(self._current_rule_name) + while True: + # Add the possibility to go into the state of a and come back + # to finish. + aa.add_arc(a) + z.add_arc(zz) + if self.value != "|": + break + + self._gettoken() + a, z = self._parse_items() + return aa, zz + + def _parse_items(self): + # items: item+ + a, b = self._parse_item() + while self.type in (PythonTokenTypes.NAME, PythonTokenTypes.STRING) \ + or self.value in ('(', '['): + c, d = self._parse_item() + # Need to end on the next item. + b.add_arc(c) + b = d + return a, b + + def _parse_item(self): + # item: '[' rhs ']' | atom ['+' | '*'] + if self.value == "[": + self._gettoken() + a, z = self._parse_rhs() + self._expect(PythonTokenTypes.OP, ']') + # Make it also possible that there is no token and change the + # state. + a.add_arc(z) + return a, z + else: + a, z = self._parse_atom() + value = self.value + if value not in ("+", "*"): + return a, z + self._gettoken() + # Make it clear that we can go back to the old state and repeat. + z.add_arc(a) + if value == "+": + return a, z + else: + # The end state is the same as the beginning, nothing must + # change. + return a, a + + def _parse_atom(self): + # atom: '(' rhs ')' | NAME | STRING + if self.value == "(": + self._gettoken() + a, z = self._parse_rhs() + self._expect(PythonTokenTypes.OP, ')') + return a, z + elif self.type in (PythonTokenTypes.NAME, PythonTokenTypes.STRING): + a = NFAState(self._current_rule_name) + z = NFAState(self._current_rule_name) + # Make it clear that the state transition requires that value. + a.add_arc(z, self.value) + self._gettoken() + return a, z + else: + self._raise_error("expected (...) or NAME or STRING, got %s/%s", + self.type, self.value) + + def _expect(self, type_, value=None): + if self.type != type_: + self._raise_error("expected %s, got %s [%s]", + type_, self.type, self.value) + if value is not None and self.value != value: + self._raise_error("expected %s, got %s", value, self.value) + value = self.value + self._gettoken() + return value + + def _gettoken(self): + tup = next(self.generator) + self.type, self.value, self.begin, prefix = tup + + def _raise_error(self, msg, *args): + if args: + try: + msg = msg % args + except: + msg = " ".join([msg] + list(map(str, args))) + line = self._bnf_grammar.splitlines()[self.begin[0] - 1] + raise SyntaxError(msg, ('', self.begin[0], + self.begin[1], line)) diff --git a/bundle/jedi-vim/pythonx/parso/parso/py.typed b/bundle/jedi-vim/pythonx/parso/parso/py.typed new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/parso/parso/python/__init__.py b/bundle/jedi-vim/pythonx/parso/parso/python/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/parso/parso/python/diff.py b/bundle/jedi-vim/pythonx/parso/parso/python/diff.py new file mode 100644 index 000000000..ba999fa4b --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/python/diff.py @@ -0,0 +1,886 @@ +""" +The diff parser is trying to be a faster version of the normal parser by trying +to reuse the nodes of a previous pass over the same file. This is also called +incremental parsing in parser literature. The difference is mostly that with +incremental parsing you get a range that needs to be reparsed. Here we +calculate that range ourselves by using difflib. After that it's essentially +incremental parsing. + +The biggest issue of this approach is that we reuse nodes in a mutable way. The +intial design and idea is quite problematic for this parser, but it is also +pretty fast. Measurements showed that just copying nodes in Python is simply +quite a bit slower (especially for big files >3 kLOC). Therefore we did not +want to get rid of the mutable nodes, since this is usually not an issue. + +This is by far the hardest software I ever wrote, exactly because the initial +design is crappy. When you have to account for a lot of mutable state, it +creates a ton of issues that you would otherwise not have. This file took +probably 3-6 months to write, which is insane for a parser. + +There is a fuzzer in that helps test this whole thing. Please use it if you +make changes here. If you run the fuzzer like:: + + test/fuzz_diff_parser.py random -n 100000 + +you can be pretty sure that everything is still fine. I sometimes run the +fuzzer up to 24h to make sure everything is still ok. +""" +import re +import difflib +from collections import namedtuple +import logging + +from parso.utils import split_lines +from parso.python.parser import Parser +from parso.python.tree import EndMarker +from parso.python.tokenize import PythonToken, BOM_UTF8_STRING +from parso.python.token import PythonTokenTypes + +LOG = logging.getLogger(__name__) +DEBUG_DIFF_PARSER = False + +_INDENTATION_TOKENS = 'INDENT', 'ERROR_DEDENT', 'DEDENT' + +NEWLINE = PythonTokenTypes.NEWLINE +DEDENT = PythonTokenTypes.DEDENT +NAME = PythonTokenTypes.NAME +ERROR_DEDENT = PythonTokenTypes.ERROR_DEDENT +ENDMARKER = PythonTokenTypes.ENDMARKER + + +def _is_indentation_error_leaf(node): + return node.type == 'error_leaf' and node.token_type in _INDENTATION_TOKENS + + +def _get_previous_leaf_if_indentation(leaf): + while leaf and _is_indentation_error_leaf(leaf): + leaf = leaf.get_previous_leaf() + return leaf + + +def _get_next_leaf_if_indentation(leaf): + while leaf and _is_indentation_error_leaf(leaf): + leaf = leaf.get_next_leaf() + return leaf + + +def _get_suite_indentation(tree_node): + return _get_indentation(tree_node.children[1]) + + +def _get_indentation(tree_node): + return tree_node.start_pos[1] + + +def _assert_valid_graph(node): + """ + Checks if the parent/children relationship is correct. + + This is a check that only runs during debugging/testing. + """ + try: + children = node.children + except AttributeError: + # Ignore INDENT is necessary, because indent/dedent tokens don't + # contain value/prefix and are just around, because of the tokenizer. + if node.type == 'error_leaf' and node.token_type in _INDENTATION_TOKENS: + assert not node.value + assert not node.prefix + return + + # Calculate the content between two start positions. + previous_leaf = _get_previous_leaf_if_indentation(node.get_previous_leaf()) + if previous_leaf is None: + content = node.prefix + previous_start_pos = 1, 0 + else: + assert previous_leaf.end_pos <= node.start_pos, \ + (previous_leaf, node) + + content = previous_leaf.value + node.prefix + previous_start_pos = previous_leaf.start_pos + + if '\n' in content or '\r' in content: + splitted = split_lines(content) + line = previous_start_pos[0] + len(splitted) - 1 + actual = line, len(splitted[-1]) + else: + actual = previous_start_pos[0], previous_start_pos[1] + len(content) + if content.startswith(BOM_UTF8_STRING) \ + and node.get_start_pos_of_prefix() == (1, 0): + # Remove the byte order mark + actual = actual[0], actual[1] - 1 + + assert node.start_pos == actual, (node.start_pos, actual) + else: + for child in children: + assert child.parent == node, (node, child) + _assert_valid_graph(child) + + +def _assert_nodes_are_equal(node1, node2): + try: + children1 = node1.children + except AttributeError: + assert not hasattr(node2, 'children'), (node1, node2) + assert node1.value == node2.value, (node1, node2) + assert node1.type == node2.type, (node1, node2) + assert node1.prefix == node2.prefix, (node1, node2) + assert node1.start_pos == node2.start_pos, (node1, node2) + return + else: + try: + children2 = node2.children + except AttributeError: + assert False, (node1, node2) + for n1, n2 in zip(children1, children2): + _assert_nodes_are_equal(n1, n2) + assert len(children1) == len(children2), '\n' + repr(children1) + '\n' + repr(children2) + + +def _get_debug_error_message(module, old_lines, new_lines): + current_lines = split_lines(module.get_code(), keepends=True) + current_diff = difflib.unified_diff(new_lines, current_lines) + old_new_diff = difflib.unified_diff(old_lines, new_lines) + import parso + return ( + "There's an issue with the diff parser. Please " + "report (parso v%s) - Old/New:\n%s\nActual Diff (May be empty):\n%s" + % (parso.__version__, ''.join(old_new_diff), ''.join(current_diff)) + ) + + +def _get_last_line(node_or_leaf): + last_leaf = node_or_leaf.get_last_leaf() + if _ends_with_newline(last_leaf): + return last_leaf.start_pos[0] + else: + n = last_leaf.get_next_leaf() + if n.type == 'endmarker' and '\n' in n.prefix: + # This is a very special case and has to do with error recovery in + # Parso. The problem is basically that there's no newline leaf at + # the end sometimes (it's required in the grammar, but not needed + # actually before endmarker, CPython just adds a newline to make + # source code pass the parser, to account for that Parso error + # recovery allows small_stmt instead of simple_stmt). + return last_leaf.end_pos[0] + 1 + return last_leaf.end_pos[0] + + +def _skip_dedent_error_leaves(leaf): + while leaf is not None and leaf.type == 'error_leaf' and leaf.token_type == 'DEDENT': + leaf = leaf.get_previous_leaf() + return leaf + + +def _ends_with_newline(leaf, suffix=''): + leaf = _skip_dedent_error_leaves(leaf) + + if leaf.type == 'error_leaf': + typ = leaf.token_type.lower() + else: + typ = leaf.type + + return typ == 'newline' or suffix.endswith('\n') or suffix.endswith('\r') + + +def _flows_finished(pgen_grammar, stack): + """ + if, while, for and try might not be finished, because another part might + still be parsed. + """ + for stack_node in stack: + if stack_node.nonterminal in ('if_stmt', 'while_stmt', 'for_stmt', 'try_stmt'): + return False + return True + + +def _func_or_class_has_suite(node): + if node.type == 'decorated': + node = node.children[-1] + if node.type in ('async_funcdef', 'async_stmt'): + node = node.children[-1] + return node.type in ('classdef', 'funcdef') and node.children[-1].type == 'suite' + + +def _suite_or_file_input_is_valid(pgen_grammar, stack): + if not _flows_finished(pgen_grammar, stack): + return False + + for stack_node in reversed(stack): + if stack_node.nonterminal == 'decorator': + # A decorator is only valid with the upcoming function. + return False + + if stack_node.nonterminal == 'suite': + # If only newline is in the suite, the suite is not valid, yet. + return len(stack_node.nodes) > 1 + # Not reaching a suite means that we're dealing with file_input levels + # where there's no need for a valid statement in it. It can also be empty. + return True + + +def _is_flow_node(node): + if node.type == 'async_stmt': + node = node.children[1] + try: + value = node.children[0].value + except AttributeError: + return False + return value in ('if', 'for', 'while', 'try', 'with') + + +class _PositionUpdatingFinished(Exception): + pass + + +def _update_positions(nodes, line_offset, last_leaf): + for node in nodes: + try: + children = node.children + except AttributeError: + # Is a leaf + node.line += line_offset + if node is last_leaf: + raise _PositionUpdatingFinished + else: + _update_positions(children, line_offset, last_leaf) + + +class DiffParser: + """ + An advanced form of parsing a file faster. Unfortunately comes with huge + side effects. It changes the given module. + """ + def __init__(self, pgen_grammar, tokenizer, module): + self._pgen_grammar = pgen_grammar + self._tokenizer = tokenizer + self._module = module + + def _reset(self): + self._copy_count = 0 + self._parser_count = 0 + + self._nodes_tree = _NodesTree(self._module) + + def update(self, old_lines, new_lines): + ''' + The algorithm works as follows: + + Equal: + - Assure that the start is a newline, otherwise parse until we get + one. + - Copy from parsed_until_line + 1 to max(i2 + 1) + - Make sure that the indentation is correct (e.g. add DEDENT) + - Add old and change positions + Insert: + - Parse from parsed_until_line + 1 to min(j2 + 1), hopefully not + much more. + + Returns the new module node. + ''' + LOG.debug('diff parser start') + # Reset the used names cache so they get regenerated. + self._module._used_names = None + + self._parser_lines_new = new_lines + + self._reset() + + line_length = len(new_lines) + sm = difflib.SequenceMatcher(None, old_lines, self._parser_lines_new) + opcodes = sm.get_opcodes() + LOG.debug('line_lengths old: %s; new: %s' % (len(old_lines), line_length)) + + for operation, i1, i2, j1, j2 in opcodes: + LOG.debug('-> code[%s] old[%s:%s] new[%s:%s]', + operation, i1 + 1, i2, j1 + 1, j2) + + if j2 == line_length and new_lines[-1] == '': + # The empty part after the last newline is not relevant. + j2 -= 1 + + if operation == 'equal': + line_offset = j1 - i1 + self._copy_from_old_parser(line_offset, i1 + 1, i2, j2) + elif operation == 'replace': + self._parse(until_line=j2) + elif operation == 'insert': + self._parse(until_line=j2) + else: + assert operation == 'delete' + + # With this action all change will finally be applied and we have a + # changed module. + self._nodes_tree.close() + + if DEBUG_DIFF_PARSER: + # If there is reasonable suspicion that the diff parser is not + # behaving well, this should be enabled. + try: + code = ''.join(new_lines) + assert self._module.get_code() == code + _assert_valid_graph(self._module) + without_diff_parser_module = Parser( + self._pgen_grammar, + error_recovery=True + ).parse(self._tokenizer(new_lines)) + _assert_nodes_are_equal(self._module, without_diff_parser_module) + except AssertionError: + print(_get_debug_error_message(self._module, old_lines, new_lines)) + raise + + last_pos = self._module.end_pos[0] + if last_pos != line_length: + raise Exception( + ('(%s != %s) ' % (last_pos, line_length)) + + _get_debug_error_message(self._module, old_lines, new_lines) + ) + LOG.debug('diff parser end') + return self._module + + def _enabled_debugging(self, old_lines, lines_new): + if self._module.get_code() != ''.join(lines_new): + LOG.warning('parser issue:\n%s\n%s', ''.join(old_lines), ''.join(lines_new)) + + def _copy_from_old_parser(self, line_offset, start_line_old, until_line_old, until_line_new): + last_until_line = -1 + while until_line_new > self._nodes_tree.parsed_until_line: + parsed_until_line_old = self._nodes_tree.parsed_until_line - line_offset + line_stmt = self._get_old_line_stmt(parsed_until_line_old + 1) + if line_stmt is None: + # Parse 1 line at least. We don't need more, because we just + # want to get into a state where the old parser has statements + # again that can be copied (e.g. not lines within parentheses). + self._parse(self._nodes_tree.parsed_until_line + 1) + else: + p_children = line_stmt.parent.children + index = p_children.index(line_stmt) + + if start_line_old == 1 \ + and p_children[0].get_first_leaf().prefix.startswith(BOM_UTF8_STRING): + # If there's a BOM in the beginning, just reparse. It's too + # complicated to account for it otherwise. + copied_nodes = [] + else: + from_ = self._nodes_tree.parsed_until_line + 1 + copied_nodes = self._nodes_tree.copy_nodes( + p_children[index:], + until_line_old, + line_offset + ) + # Match all the nodes that are in the wanted range. + if copied_nodes: + self._copy_count += 1 + + to = self._nodes_tree.parsed_until_line + + LOG.debug('copy old[%s:%s] new[%s:%s]', + copied_nodes[0].start_pos[0], + copied_nodes[-1].end_pos[0] - 1, from_, to) + else: + # We have copied as much as possible (but definitely not too + # much). Therefore we just parse a bit more. + self._parse(self._nodes_tree.parsed_until_line + 1) + # Since there are potential bugs that might loop here endlessly, we + # just stop here. + assert last_until_line != self._nodes_tree.parsed_until_line, last_until_line + last_until_line = self._nodes_tree.parsed_until_line + + def _get_old_line_stmt(self, old_line): + leaf = self._module.get_leaf_for_position((old_line, 0), include_prefixes=True) + + if _ends_with_newline(leaf): + leaf = leaf.get_next_leaf() + if leaf.get_start_pos_of_prefix()[0] == old_line: + node = leaf + while node.parent.type not in ('file_input', 'suite'): + node = node.parent + + # Make sure that if only the `else:` line of an if statement is + # copied that not the whole thing is going to be copied. + if node.start_pos[0] >= old_line: + return node + # Must be on the same line. Otherwise we need to parse that bit. + return None + + def _parse(self, until_line): + """ + Parses at least until the given line, but might just parse more until a + valid state is reached. + """ + last_until_line = 0 + while until_line > self._nodes_tree.parsed_until_line: + node = self._try_parse_part(until_line) + nodes = node.children + + self._nodes_tree.add_parsed_nodes(nodes, self._keyword_token_indents) + if self._replace_tos_indent is not None: + self._nodes_tree.indents[-1] = self._replace_tos_indent + + LOG.debug( + 'parse_part from %s to %s (to %s in part parser)', + nodes[0].get_start_pos_of_prefix()[0], + self._nodes_tree.parsed_until_line, + node.end_pos[0] - 1 + ) + # Since the tokenizer sometimes has bugs, we cannot be sure that + # this loop terminates. Therefore assert that there's always a + # change. + assert last_until_line != self._nodes_tree.parsed_until_line, last_until_line + last_until_line = self._nodes_tree.parsed_until_line + + def _try_parse_part(self, until_line): + """ + Sets up a normal parser that uses a spezialized tokenizer to only parse + until a certain position (or a bit longer if the statement hasn't + ended. + """ + self._parser_count += 1 + # TODO speed up, shouldn't copy the whole list all the time. + # memoryview? + parsed_until_line = self._nodes_tree.parsed_until_line + lines_after = self._parser_lines_new[parsed_until_line:] + tokens = self._diff_tokenize( + lines_after, + until_line, + line_offset=parsed_until_line + ) + self._active_parser = Parser( + self._pgen_grammar, + error_recovery=True + ) + return self._active_parser.parse(tokens=tokens) + + def _diff_tokenize(self, lines, until_line, line_offset=0): + was_newline = False + indents = self._nodes_tree.indents + initial_indentation_count = len(indents) + + tokens = self._tokenizer( + lines, + start_pos=(line_offset + 1, 0), + indents=indents, + is_first_token=line_offset == 0, + ) + stack = self._active_parser.stack + self._replace_tos_indent = None + self._keyword_token_indents = {} + # print('start', line_offset + 1, indents) + for token in tokens: + # print(token, indents) + typ = token.type + if typ == DEDENT: + if len(indents) < initial_indentation_count: + # We are done here, only thing that can come now is an + # endmarker or another dedented code block. + while True: + typ, string, start_pos, prefix = token = next(tokens) + if typ in (DEDENT, ERROR_DEDENT): + if typ == ERROR_DEDENT: + # We want to force an error dedent in the next + # parser/pass. To make this possible we just + # increase the location by one. + self._replace_tos_indent = start_pos[1] + 1 + pass + else: + break + + if '\n' in prefix or '\r' in prefix: + prefix = re.sub(r'[^\n\r]+\Z', '', prefix) + else: + assert start_pos[1] >= len(prefix), repr(prefix) + if start_pos[1] - len(prefix) == 0: + prefix = '' + yield PythonToken( + ENDMARKER, '', + start_pos, + prefix + ) + break + elif typ == NEWLINE and token.start_pos[0] >= until_line: + was_newline = True + elif was_newline: + was_newline = False + if len(indents) == initial_indentation_count: + # Check if the parser is actually in a valid suite state. + if _suite_or_file_input_is_valid(self._pgen_grammar, stack): + yield PythonToken(ENDMARKER, '', token.start_pos, '') + break + + if typ == NAME and token.string in ('class', 'def'): + self._keyword_token_indents[token.start_pos] = list(indents) + + yield token + + +class _NodesTreeNode: + _ChildrenGroup = namedtuple( + '_ChildrenGroup', + 'prefix children line_offset last_line_offset_leaf') + + def __init__(self, tree_node, parent=None, indentation=0): + self.tree_node = tree_node + self._children_groups = [] + self.parent = parent + self._node_children = [] + self.indentation = indentation + + def finish(self): + children = [] + for prefix, children_part, line_offset, last_line_offset_leaf in self._children_groups: + first_leaf = _get_next_leaf_if_indentation( + children_part[0].get_first_leaf() + ) + + first_leaf.prefix = prefix + first_leaf.prefix + if line_offset != 0: + try: + _update_positions( + children_part, line_offset, last_line_offset_leaf) + except _PositionUpdatingFinished: + pass + children += children_part + self.tree_node.children = children + # Reset the parents + for node in children: + node.parent = self.tree_node + + for node_child in self._node_children: + node_child.finish() + + def add_child_node(self, child_node): + self._node_children.append(child_node) + + def add_tree_nodes(self, prefix, children, line_offset=0, + last_line_offset_leaf=None): + if last_line_offset_leaf is None: + last_line_offset_leaf = children[-1].get_last_leaf() + group = self._ChildrenGroup( + prefix, children, line_offset, last_line_offset_leaf + ) + self._children_groups.append(group) + + def get_last_line(self, suffix): + line = 0 + if self._children_groups: + children_group = self._children_groups[-1] + last_leaf = _get_previous_leaf_if_indentation( + children_group.last_line_offset_leaf + ) + + line = last_leaf.end_pos[0] + children_group.line_offset + + # Newlines end on the next line, which means that they would cover + # the next line. That line is not fully parsed at this point. + if _ends_with_newline(last_leaf, suffix): + line -= 1 + line += len(split_lines(suffix)) - 1 + + if suffix and not suffix.endswith('\n') and not suffix.endswith('\r'): + # This is the end of a file (that doesn't end with a newline). + line += 1 + + if self._node_children: + return max(line, self._node_children[-1].get_last_line(suffix)) + return line + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.tree_node) + + +class _NodesTree: + def __init__(self, module): + self._base_node = _NodesTreeNode(module) + self._working_stack = [self._base_node] + self._module = module + self._prefix_remainder = '' + self.prefix = '' + self.indents = [0] + + @property + def parsed_until_line(self): + return self._working_stack[-1].get_last_line(self.prefix) + + def _update_insertion_node(self, indentation): + for node in reversed(list(self._working_stack)): + if node.indentation < indentation or node is self._working_stack[0]: + return node + self._working_stack.pop() + + def add_parsed_nodes(self, tree_nodes, keyword_token_indents): + old_prefix = self.prefix + tree_nodes = self._remove_endmarker(tree_nodes) + if not tree_nodes: + self.prefix = old_prefix + self.prefix + return + + assert tree_nodes[0].type != 'newline' + + node = self._update_insertion_node(tree_nodes[0].start_pos[1]) + assert node.tree_node.type in ('suite', 'file_input') + node.add_tree_nodes(old_prefix, tree_nodes) + # tos = Top of stack + self._update_parsed_node_tos(tree_nodes[-1], keyword_token_indents) + + def _update_parsed_node_tos(self, tree_node, keyword_token_indents): + if tree_node.type == 'suite': + def_leaf = tree_node.parent.children[0] + new_tos = _NodesTreeNode( + tree_node, + indentation=keyword_token_indents[def_leaf.start_pos][-1], + ) + new_tos.add_tree_nodes('', list(tree_node.children)) + + self._working_stack[-1].add_child_node(new_tos) + self._working_stack.append(new_tos) + + self._update_parsed_node_tos(tree_node.children[-1], keyword_token_indents) + elif _func_or_class_has_suite(tree_node): + self._update_parsed_node_tos(tree_node.children[-1], keyword_token_indents) + + def _remove_endmarker(self, tree_nodes): + """ + Helps cleaning up the tree nodes that get inserted. + """ + last_leaf = tree_nodes[-1].get_last_leaf() + is_endmarker = last_leaf.type == 'endmarker' + self._prefix_remainder = '' + if is_endmarker: + prefix = last_leaf.prefix + separation = max(prefix.rfind('\n'), prefix.rfind('\r')) + if separation > -1: + # Remove the whitespace part of the prefix after a newline. + # That is not relevant if parentheses were opened. Always parse + # until the end of a line. + last_leaf.prefix, self._prefix_remainder = \ + last_leaf.prefix[:separation + 1], last_leaf.prefix[separation + 1:] + + self.prefix = '' + + if is_endmarker: + self.prefix = last_leaf.prefix + + tree_nodes = tree_nodes[:-1] + return tree_nodes + + def _get_matching_indent_nodes(self, tree_nodes, is_new_suite): + # There might be a random dedent where we have to stop copying. + # Invalid indents are ok, because the parser handled that + # properly before. An invalid dedent can happen, because a few + # lines above there was an invalid indent. + node_iterator = iter(tree_nodes) + if is_new_suite: + yield next(node_iterator) + + first_node = next(node_iterator) + indent = _get_indentation(first_node) + if not is_new_suite and indent not in self.indents: + return + yield first_node + + for n in node_iterator: + if _get_indentation(n) != indent: + return + yield n + + def copy_nodes(self, tree_nodes, until_line, line_offset): + """ + Copies tree nodes from the old parser tree. + + Returns the number of tree nodes that were copied. + """ + if tree_nodes[0].type in ('error_leaf', 'error_node'): + # Avoid copying errors in the beginning. Can lead to a lot of + # issues. + return [] + + indentation = _get_indentation(tree_nodes[0]) + old_working_stack = list(self._working_stack) + old_prefix = self.prefix + old_indents = self.indents + self.indents = [i for i in self.indents if i <= indentation] + + self._update_insertion_node(indentation) + + new_nodes, self._working_stack, self.prefix, added_indents = self._copy_nodes( + list(self._working_stack), + tree_nodes, + until_line, + line_offset, + self.prefix, + ) + if new_nodes: + self.indents += added_indents + else: + self._working_stack = old_working_stack + self.prefix = old_prefix + self.indents = old_indents + return new_nodes + + def _copy_nodes(self, working_stack, nodes, until_line, line_offset, + prefix='', is_nested=False): + new_nodes = [] + added_indents = [] + + nodes = list(self._get_matching_indent_nodes( + nodes, + is_new_suite=is_nested, + )) + + new_prefix = '' + for node in nodes: + if node.start_pos[0] > until_line: + break + + if node.type == 'endmarker': + break + + if node.type == 'error_leaf' and node.token_type in ('DEDENT', 'ERROR_DEDENT'): + break + # TODO this check might take a bit of time for large files. We + # might want to change this to do more intelligent guessing or + # binary search. + if _get_last_line(node) > until_line: + # We can split up functions and classes later. + if _func_or_class_has_suite(node): + new_nodes.append(node) + break + try: + c = node.children + except AttributeError: + pass + else: + # This case basically appears with error recovery of one line + # suites like `def foo(): bar.-`. In this case we might not + # include a newline in the statement and we need to take care + # of that. + n = node + if n.type == 'decorated': + n = n.children[-1] + if n.type in ('async_funcdef', 'async_stmt'): + n = n.children[-1] + if n.type in ('classdef', 'funcdef'): + suite_node = n.children[-1] + else: + suite_node = c[-1] + + if suite_node.type in ('error_leaf', 'error_node'): + break + + new_nodes.append(node) + + # Pop error nodes at the end from the list + if new_nodes: + while new_nodes: + last_node = new_nodes[-1] + if (last_node.type in ('error_leaf', 'error_node') + or _is_flow_node(new_nodes[-1])): + # Error leafs/nodes don't have a defined start/end. Error + # nodes might not end with a newline (e.g. if there's an + # open `(`). Therefore ignore all of them unless they are + # succeeded with valid parser state. + # If we copy flows at the end, they might be continued + # after the copy limit (in the new parser). + # In this while loop we try to remove until we find a newline. + new_prefix = '' + new_nodes.pop() + while new_nodes: + last_node = new_nodes[-1] + if last_node.get_last_leaf().type == 'newline': + break + new_nodes.pop() + continue + if len(new_nodes) > 1 and new_nodes[-2].type == 'error_node': + # The problem here is that Parso error recovery sometimes + # influences nodes before this node. + # Since the new last node is an error node this will get + # cleaned up in the next while iteration. + new_nodes.pop() + continue + break + + if not new_nodes: + return [], working_stack, prefix, added_indents + + tos = working_stack[-1] + last_node = new_nodes[-1] + had_valid_suite_last = False + # Pop incomplete suites from the list + if _func_or_class_has_suite(last_node): + suite = last_node + while suite.type != 'suite': + suite = suite.children[-1] + + indent = _get_suite_indentation(suite) + added_indents.append(indent) + + suite_tos = _NodesTreeNode(suite, indentation=_get_indentation(last_node)) + # Don't need to pass line_offset here, it's already done by the + # parent. + suite_nodes, new_working_stack, new_prefix, ai = self._copy_nodes( + working_stack + [suite_tos], suite.children, until_line, line_offset, + is_nested=True, + ) + added_indents += ai + if len(suite_nodes) < 2: + # A suite only with newline is not valid. + new_nodes.pop() + new_prefix = '' + else: + assert new_nodes + tos.add_child_node(suite_tos) + working_stack = new_working_stack + had_valid_suite_last = True + + if new_nodes: + if not _ends_with_newline(new_nodes[-1].get_last_leaf()) and not had_valid_suite_last: + p = new_nodes[-1].get_next_leaf().prefix + # We are not allowed to remove the newline at the end of the + # line, otherwise it's going to be missing. This happens e.g. + # if a bracket is around before that moves newlines to + # prefixes. + new_prefix = split_lines(p, keepends=True)[0] + + if had_valid_suite_last: + last = new_nodes[-1] + if last.type == 'decorated': + last = last.children[-1] + if last.type in ('async_funcdef', 'async_stmt'): + last = last.children[-1] + last_line_offset_leaf = last.children[-2].get_last_leaf() + assert last_line_offset_leaf == ':' + else: + last_line_offset_leaf = new_nodes[-1].get_last_leaf() + tos.add_tree_nodes( + prefix, new_nodes, line_offset, last_line_offset_leaf, + ) + prefix = new_prefix + self._prefix_remainder = '' + + return new_nodes, working_stack, prefix, added_indents + + def close(self): + self._base_node.finish() + + # Add an endmarker. + try: + last_leaf = self._module.get_last_leaf() + except IndexError: + end_pos = [1, 0] + else: + last_leaf = _skip_dedent_error_leaves(last_leaf) + end_pos = list(last_leaf.end_pos) + lines = split_lines(self.prefix) + assert len(lines) > 0 + if len(lines) == 1: + if lines[0].startswith(BOM_UTF8_STRING) and end_pos == [1, 0]: + end_pos[1] -= 1 + end_pos[1] += len(lines[0]) + else: + end_pos[0] += len(lines) - 1 + end_pos[1] = len(lines[-1]) + + endmarker = EndMarker('', tuple(end_pos), self.prefix + self._prefix_remainder) + endmarker.parent = self._module + self._module.children.append(endmarker) diff --git a/bundle/jedi-vim/pythonx/parso/parso/python/errors.py b/bundle/jedi-vim/pythonx/parso/parso/python/errors.py new file mode 100644 index 000000000..5da046ab0 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/python/errors.py @@ -0,0 +1,1295 @@ +# -*- coding: utf-8 -*- +import codecs +import warnings +import re +from contextlib import contextmanager + +from parso.normalizer import Normalizer, NormalizerConfig, Issue, Rule +from parso.python.tokenize import _get_token_collection + +_BLOCK_STMTS = ('if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt') +_STAR_EXPR_PARENTS = ('testlist_star_expr', 'testlist_comp', 'exprlist') +# This is the maximal block size given by python. +_MAX_BLOCK_SIZE = 20 +_MAX_INDENT_COUNT = 100 +ALLOWED_FUTURES = ( + 'nested_scopes', 'generators', 'division', 'absolute_import', + 'with_statement', 'print_function', 'unicode_literals', 'generator_stop', +) +_COMP_FOR_TYPES = ('comp_for', 'sync_comp_for') + + +def _get_rhs_name(node, version): + type_ = node.type + if type_ == "lambdef": + return "lambda" + elif type_ == "atom": + comprehension = _get_comprehension_type(node) + first, second = node.children[:2] + if comprehension is not None: + return comprehension + elif second.type == "dictorsetmaker": + if version < (3, 8): + return "literal" + else: + if second.children[1] == ":" or second.children[0] == "**": + return "dict display" + else: + return "set display" + elif ( + first == "(" + and (second == ")" + or (len(node.children) == 3 and node.children[1].type == "testlist_comp")) + ): + return "tuple" + elif first == "(": + return _get_rhs_name(_remove_parens(node), version=version) + elif first == "[": + return "list" + elif first == "{" and second == "}": + return "dict display" + elif first == "{" and len(node.children) > 2: + return "set display" + elif type_ == "keyword": + if "yield" in node.value: + return "yield expression" + if version < (3, 8): + return "keyword" + else: + return str(node.value) + elif type_ == "operator" and node.value == "...": + return "Ellipsis" + elif type_ == "comparison": + return "comparison" + elif type_ in ("string", "number", "strings"): + return "literal" + elif type_ == "yield_expr": + return "yield expression" + elif type_ == "test": + return "conditional expression" + elif type_ in ("atom_expr", "power"): + if node.children[0] == "await": + return "await expression" + elif node.children[-1].type == "trailer": + trailer = node.children[-1] + if trailer.children[0] == "(": + return "function call" + elif trailer.children[0] == "[": + return "subscript" + elif trailer.children[0] == ".": + return "attribute" + elif ( + ("expr" in type_ and "star_expr" not in type_) # is a substring + or "_test" in type_ + or type_ in ("term", "factor") + ): + return "operator" + elif type_ == "star_expr": + return "starred" + elif type_ == "testlist_star_expr": + return "tuple" + elif type_ == "fstring": + return "f-string expression" + return type_ # shouldn't reach here + + +def _iter_stmts(scope): + """ + Iterates over all statements and splits up simple_stmt. + """ + for child in scope.children: + if child.type == 'simple_stmt': + for child2 in child.children: + if child2.type == 'newline' or child2 == ';': + continue + yield child2 + else: + yield child + + +def _get_comprehension_type(atom): + first, second = atom.children[:2] + if second.type == 'testlist_comp' and second.children[1].type in _COMP_FOR_TYPES: + if first == '[': + return 'list comprehension' + else: + return 'generator expression' + elif second.type == 'dictorsetmaker' and second.children[-1].type in _COMP_FOR_TYPES: + if second.children[1] == ':': + return 'dict comprehension' + else: + return 'set comprehension' + return None + + +def _is_future_import(import_from): + # It looks like a __future__ import that is relative is still a future + # import. That feels kind of odd, but whatever. + # if import_from.level != 0: + # return False + from_names = import_from.get_from_names() + return [n.value for n in from_names] == ['__future__'] + + +def _remove_parens(atom): + """ + Returns the inner part of an expression like `(foo)`. Also removes nested + parens. + """ + try: + children = atom.children + except AttributeError: + pass + else: + if len(children) == 3 and children[0] == '(': + return _remove_parens(atom.children[1]) + return atom + + +def _skip_parens_bottom_up(node): + """ + Returns an ancestor node of an expression, skipping all levels of parens + bottom-up. + """ + while node.parent is not None: + node = node.parent + if node.type != 'atom' or node.children[0] != '(': + return node + return None + + +def _iter_params(parent_node): + return (n for n in parent_node.children if n.type == 'param' or n.type == 'operator') + + +def _is_future_import_first(import_from): + """ + Checks if the import is the first statement of a file. + """ + found_docstring = False + for stmt in _iter_stmts(import_from.get_root_node()): + if stmt.type == 'string' and not found_docstring: + continue + found_docstring = True + + if stmt == import_from: + return True + if stmt.type == 'import_from' and _is_future_import(stmt): + continue + return False + + +def _iter_definition_exprs_from_lists(exprlist): + def check_expr(child): + if child.type == 'atom': + if child.children[0] == '(': + testlist_comp = child.children[1] + if testlist_comp.type == 'testlist_comp': + yield from _iter_definition_exprs_from_lists(testlist_comp) + return + else: + # It's a paren that doesn't do anything, like 1 + (1) + yield from check_expr(testlist_comp) + return + elif child.children[0] == '[': + yield testlist_comp + return + yield child + + if exprlist.type in _STAR_EXPR_PARENTS: + for child in exprlist.children[::2]: + yield from check_expr(child) + else: + yield from check_expr(exprlist) + + +def _get_expr_stmt_definition_exprs(expr_stmt): + exprs = [] + for list_ in expr_stmt.children[:-2:2]: + if list_.type in ('testlist_star_expr', 'testlist'): + exprs += _iter_definition_exprs_from_lists(list_) + else: + exprs.append(list_) + return exprs + + +def _get_for_stmt_definition_exprs(for_stmt): + exprlist = for_stmt.children[1] + return list(_iter_definition_exprs_from_lists(exprlist)) + + +def _is_argument_comprehension(argument): + return argument.children[1].type in _COMP_FOR_TYPES + + +def _any_fstring_error(version, node): + if version < (3, 9) or node is None: + return False + if node.type == "error_node": + return any(child.type == "fstring_start" for child in node.children) + elif node.type == "fstring": + return True + else: + return node.search_ancestor("fstring") + + +class _Context: + def __init__(self, node, add_syntax_error, parent_context=None): + self.node = node + self.blocks = [] + self.parent_context = parent_context + self._used_name_dict = {} + self._global_names = [] + self._local_params_names = [] + self._nonlocal_names = [] + self._nonlocal_names_in_subscopes = [] + self._add_syntax_error = add_syntax_error + + def is_async_funcdef(self): + # Stupidly enough async funcdefs can have two different forms, + # depending if a decorator is used or not. + return self.is_function() \ + and self.node.parent.type in ('async_funcdef', 'async_stmt') + + def is_function(self): + return self.node.type == 'funcdef' + + def add_name(self, name): + parent_type = name.parent.type + if parent_type == 'trailer': + # We are only interested in first level names. + return + + if parent_type == 'global_stmt': + self._global_names.append(name) + elif parent_type == 'nonlocal_stmt': + self._nonlocal_names.append(name) + elif parent_type == 'funcdef': + self._local_params_names.extend( + [param.name.value for param in name.parent.get_params()] + ) + else: + self._used_name_dict.setdefault(name.value, []).append(name) + + def finalize(self): + """ + Returns a list of nonlocal names that need to be part of that scope. + """ + self._analyze_names(self._global_names, 'global') + self._analyze_names(self._nonlocal_names, 'nonlocal') + + global_name_strs = {n.value: n for n in self._global_names} + for nonlocal_name in self._nonlocal_names: + try: + global_name = global_name_strs[nonlocal_name.value] + except KeyError: + continue + + message = "name '%s' is nonlocal and global" % global_name.value + if global_name.start_pos < nonlocal_name.start_pos: + error_name = global_name + else: + error_name = nonlocal_name + self._add_syntax_error(error_name, message) + + nonlocals_not_handled = [] + for nonlocal_name in self._nonlocal_names_in_subscopes: + search = nonlocal_name.value + if search in self._local_params_names: + continue + if search in global_name_strs or self.parent_context is None: + message = "no binding for nonlocal '%s' found" % nonlocal_name.value + self._add_syntax_error(nonlocal_name, message) + elif not self.is_function() or \ + nonlocal_name.value not in self._used_name_dict: + nonlocals_not_handled.append(nonlocal_name) + return self._nonlocal_names + nonlocals_not_handled + + def _analyze_names(self, globals_or_nonlocals, type_): + def raise_(message): + self._add_syntax_error(base_name, message % (base_name.value, type_)) + + params = [] + if self.node.type == 'funcdef': + params = self.node.get_params() + + for base_name in globals_or_nonlocals: + found_global_or_nonlocal = False + # Somehow Python does it the reversed way. + for name in reversed(self._used_name_dict.get(base_name.value, [])): + if name.start_pos > base_name.start_pos: + # All following names don't have to be checked. + found_global_or_nonlocal = True + + parent = name.parent + if parent.type == 'param' and parent.name == name: + # Skip those here, these definitions belong to the next + # scope. + continue + + if name.is_definition(): + if parent.type == 'expr_stmt' \ + and parent.children[1].type == 'annassign': + if found_global_or_nonlocal: + # If it's after the global the error seems to be + # placed there. + base_name = name + raise_("annotated name '%s' can't be %s") + break + else: + message = "name '%s' is assigned to before %s declaration" + else: + message = "name '%s' is used prior to %s declaration" + + if not found_global_or_nonlocal: + raise_(message) + # Only add an error for the first occurence. + break + + for param in params: + if param.name.value == base_name.value: + raise_("name '%s' is parameter and %s"), + + @contextmanager + def add_block(self, node): + self.blocks.append(node) + yield + self.blocks.pop() + + def add_context(self, node): + return _Context(node, self._add_syntax_error, parent_context=self) + + def close_child_context(self, child_context): + self._nonlocal_names_in_subscopes += child_context.finalize() + + +class ErrorFinder(Normalizer): + """ + Searches for errors in the syntax tree. + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._error_dict = {} + self.version = self.grammar.version_info + + def initialize(self, node): + def create_context(node): + if node is None: + return None + + parent_context = create_context(node.parent) + if node.type in ('classdef', 'funcdef', 'file_input'): + return _Context(node, self._add_syntax_error, parent_context) + return parent_context + + self.context = create_context(node) or _Context(node, self._add_syntax_error) + self._indentation_count = 0 + + def visit(self, node): + if node.type == 'error_node': + with self.visit_node(node): + # Don't need to investigate the inners of an error node. We + # might find errors in there that should be ignored, because + # the error node itself already shows that there's an issue. + return '' + return super().visit(node) + + @contextmanager + def visit_node(self, node): + self._check_type_rules(node) + + if node.type in _BLOCK_STMTS: + with self.context.add_block(node): + if len(self.context.blocks) == _MAX_BLOCK_SIZE: + self._add_syntax_error(node, "too many statically nested blocks") + yield + return + elif node.type == 'suite': + self._indentation_count += 1 + if self._indentation_count == _MAX_INDENT_COUNT: + self._add_indentation_error(node.children[1], "too many levels of indentation") + + yield + + if node.type == 'suite': + self._indentation_count -= 1 + elif node.type in ('classdef', 'funcdef'): + context = self.context + self.context = context.parent_context + self.context.close_child_context(context) + + def visit_leaf(self, leaf): + if leaf.type == 'error_leaf': + if leaf.token_type in ('INDENT', 'ERROR_DEDENT'): + # Indents/Dedents itself never have a prefix. They are just + # "pseudo" tokens that get removed by the syntax tree later. + # Therefore in case of an error we also have to check for this. + spacing = list(leaf.get_next_leaf()._split_prefix())[-1] + if leaf.token_type == 'INDENT': + message = 'unexpected indent' + else: + message = 'unindent does not match any outer indentation level' + self._add_indentation_error(spacing, message) + else: + if leaf.value.startswith('\\'): + message = 'unexpected character after line continuation character' + else: + match = re.match('\\w{,2}("{1,3}|\'{1,3})', leaf.value) + if match is None: + message = 'invalid syntax' + if ( + self.version >= (3, 9) + and leaf.value in _get_token_collection( + self.version + ).always_break_tokens + ): + message = "f-string: " + message + else: + if len(match.group(1)) == 1: + message = 'EOL while scanning string literal' + else: + message = 'EOF while scanning triple-quoted string literal' + self._add_syntax_error(leaf, message) + return '' + elif leaf.value == ':': + parent = leaf.parent + if parent.type in ('classdef', 'funcdef'): + self.context = self.context.add_context(parent) + + # The rest is rule based. + return super().visit_leaf(leaf) + + def _add_indentation_error(self, spacing, message): + self.add_issue(spacing, 903, "IndentationError: " + message) + + def _add_syntax_error(self, node, message): + self.add_issue(node, 901, "SyntaxError: " + message) + + def add_issue(self, node, code, message): + # Overwrite the default behavior. + # Check if the issues are on the same line. + line = node.start_pos[0] + args = (code, message, node) + self._error_dict.setdefault(line, args) + + def finalize(self): + self.context.finalize() + + for code, message, node in self._error_dict.values(): + self.issues.append(Issue(node, code, message)) + + +class IndentationRule(Rule): + code = 903 + + def _get_message(self, message, node): + message = super()._get_message(message, node) + return "IndentationError: " + message + + +@ErrorFinder.register_rule(type='error_node') +class _ExpectIndentedBlock(IndentationRule): + message = 'expected an indented block' + + def get_node(self, node): + leaf = node.get_next_leaf() + return list(leaf._split_prefix())[-1] + + def is_issue(self, node): + # This is the beginning of a suite that is not indented. + return node.children[-1].type == 'newline' + + +class ErrorFinderConfig(NormalizerConfig): + normalizer_class = ErrorFinder + + +class SyntaxRule(Rule): + code = 901 + + def _get_message(self, message, node): + message = super()._get_message(message, node) + if ( + "f-string" not in message + and _any_fstring_error(self._normalizer.version, node) + ): + message = "f-string: " + message + return "SyntaxError: " + message + + +@ErrorFinder.register_rule(type='error_node') +class _InvalidSyntaxRule(SyntaxRule): + message = "invalid syntax" + fstring_message = "f-string: invalid syntax" + + def get_node(self, node): + return node.get_next_leaf() + + def is_issue(self, node): + error = node.get_next_leaf().type != 'error_leaf' + if ( + error + and _any_fstring_error(self._normalizer.version, node) + ): + self.add_issue(node, message=self.fstring_message) + else: + # Error leafs will be added later as an error. + return error + + +@ErrorFinder.register_rule(value='await') +class _AwaitOutsideAsync(SyntaxRule): + message = "'await' outside async function" + + def is_issue(self, leaf): + return not self._normalizer.context.is_async_funcdef() + + def get_error_node(self, node): + # Return the whole await statement. + return node.parent + + +@ErrorFinder.register_rule(value='break') +class _BreakOutsideLoop(SyntaxRule): + message = "'break' outside loop" + + def is_issue(self, leaf): + in_loop = False + for block in self._normalizer.context.blocks: + if block.type in ('for_stmt', 'while_stmt'): + in_loop = True + return not in_loop + + +@ErrorFinder.register_rule(value='continue') +class _ContinueChecks(SyntaxRule): + message = "'continue' not properly in loop" + message_in_finally = "'continue' not supported inside 'finally' clause" + + def is_issue(self, leaf): + in_loop = False + for block in self._normalizer.context.blocks: + if block.type in ('for_stmt', 'while_stmt'): + in_loop = True + if block.type == 'try_stmt': + last_block = block.children[-3] + if ( + last_block == "finally" + and leaf.start_pos > last_block.start_pos + and self._normalizer.version < (3, 8) + ): + self.add_issue(leaf, message=self.message_in_finally) + return False # Error already added + if not in_loop: + return True + + +@ErrorFinder.register_rule(value='from') +class _YieldFromCheck(SyntaxRule): + message = "'yield from' inside async function" + + def get_node(self, leaf): + return leaf.parent.parent # This is the actual yield statement. + + def is_issue(self, leaf): + return leaf.parent.type == 'yield_arg' \ + and self._normalizer.context.is_async_funcdef() + + +@ErrorFinder.register_rule(type='name') +class _NameChecks(SyntaxRule): + message = 'cannot assign to __debug__' + message_none = 'cannot assign to None' + + def is_issue(self, leaf): + self._normalizer.context.add_name(leaf) + + if leaf.value == '__debug__' and leaf.is_definition(): + return True + + +@ErrorFinder.register_rule(type='string') +class _StringChecks(SyntaxRule): + message = "bytes can only contain ASCII literal characters." + + def is_issue(self, leaf): + string_prefix = leaf.string_prefix.lower() + if 'b' in string_prefix \ + and any(c for c in leaf.value if ord(c) > 127): + # b'ä' + return True + + if 'r' not in string_prefix: + # Raw strings don't need to be checked if they have proper + # escaping. + + payload = leaf._get_payload() + if 'b' in string_prefix: + payload = payload.encode('utf-8') + func = codecs.escape_decode + else: + func = codecs.unicode_escape_decode + + try: + with warnings.catch_warnings(): + # The warnings from parsing strings are not relevant. + warnings.filterwarnings('ignore') + func(payload) + except UnicodeDecodeError as e: + self.add_issue(leaf, message='(unicode error) ' + str(e)) + except ValueError as e: + self.add_issue(leaf, message='(value error) ' + str(e)) + + +@ErrorFinder.register_rule(value='*') +class _StarCheck(SyntaxRule): + message = "named arguments must follow bare *" + + def is_issue(self, leaf): + params = leaf.parent + if params.type == 'parameters' and params: + after = params.children[params.children.index(leaf) + 1:] + after = [child for child in after + if child not in (',', ')') and not child.star_count] + return len(after) == 0 + + +@ErrorFinder.register_rule(value='**') +class _StarStarCheck(SyntaxRule): + # e.g. {**{} for a in [1]} + # TODO this should probably get a better end_pos including + # the next sibling of leaf. + message = "dict unpacking cannot be used in dict comprehension" + + def is_issue(self, leaf): + if leaf.parent.type == 'dictorsetmaker': + comp_for = leaf.get_next_sibling().get_next_sibling() + return comp_for is not None and comp_for.type in _COMP_FOR_TYPES + + +@ErrorFinder.register_rule(value='yield') +@ErrorFinder.register_rule(value='return') +class _ReturnAndYieldChecks(SyntaxRule): + message = "'return' with value in async generator" + message_async_yield = "'yield' inside async function" + + def get_node(self, leaf): + return leaf.parent + + def is_issue(self, leaf): + if self._normalizer.context.node.type != 'funcdef': + self.add_issue(self.get_node(leaf), message="'%s' outside function" % leaf.value) + elif self._normalizer.context.is_async_funcdef() \ + and any(self._normalizer.context.node.iter_yield_exprs()): + if leaf.value == 'return' and leaf.parent.type == 'return_stmt': + return True + + +@ErrorFinder.register_rule(type='strings') +class _BytesAndStringMix(SyntaxRule): + # e.g. 's' b'' + message = "cannot mix bytes and nonbytes literals" + + def _is_bytes_literal(self, string): + if string.type == 'fstring': + return False + return 'b' in string.string_prefix.lower() + + def is_issue(self, node): + first = node.children[0] + first_is_bytes = self._is_bytes_literal(first) + for string in node.children[1:]: + if first_is_bytes != self._is_bytes_literal(string): + return True + + +@ErrorFinder.register_rule(type='import_as_names') +class _TrailingImportComma(SyntaxRule): + # e.g. from foo import a, + message = "trailing comma not allowed without surrounding parentheses" + + def is_issue(self, node): + if node.children[-1] == ',' and node.parent.children[-1] != ')': + return True + + +@ErrorFinder.register_rule(type='import_from') +class _ImportStarInFunction(SyntaxRule): + message = "import * only allowed at module level" + + def is_issue(self, node): + return node.is_star_import() and self._normalizer.context.parent_context is not None + + +@ErrorFinder.register_rule(type='import_from') +class _FutureImportRule(SyntaxRule): + message = "from __future__ imports must occur at the beginning of the file" + + def is_issue(self, node): + if _is_future_import(node): + if not _is_future_import_first(node): + return True + + for from_name, future_name in node.get_paths(): + name = future_name.value + allowed_futures = list(ALLOWED_FUTURES) + if self._normalizer.version >= (3, 7): + allowed_futures.append('annotations') + if name == 'braces': + self.add_issue(node, message="not a chance") + elif name == 'barry_as_FLUFL': + m = "Seriously I'm not implementing this :) ~ Dave" + self.add_issue(node, message=m) + elif name not in allowed_futures: + message = "future feature %s is not defined" % name + self.add_issue(node, message=message) + + +@ErrorFinder.register_rule(type='star_expr') +class _StarExprRule(SyntaxRule): + message_iterable_unpacking = "iterable unpacking cannot be used in comprehension" + + def is_issue(self, node): + def check_delete_starred(node): + while node.parent is not None: + node = node.parent + if node.type == 'del_stmt': + return True + if node.type not in (*_STAR_EXPR_PARENTS, 'atom'): + return False + return False + + if self._normalizer.version >= (3, 9): + ancestor = node.parent + else: + ancestor = _skip_parens_bottom_up(node) + # starred expression not in tuple/list/set + if ancestor.type not in (*_STAR_EXPR_PARENTS, 'dictorsetmaker') \ + and not (ancestor.type == 'atom' and ancestor.children[0] != '('): + self.add_issue(node, message="can't use starred expression here") + return + + if check_delete_starred(node): + if self._normalizer.version >= (3, 9): + self.add_issue(node, message="cannot delete starred") + else: + self.add_issue(node, message="can't use starred expression here") + return + + if node.parent.type == 'testlist_comp': + # [*[] for a in [1]] + if node.parent.children[1].type in _COMP_FOR_TYPES: + self.add_issue(node, message=self.message_iterable_unpacking) + + +@ErrorFinder.register_rule(types=_STAR_EXPR_PARENTS) +class _StarExprParentRule(SyntaxRule): + def is_issue(self, node): + def is_definition(node, ancestor): + if ancestor is None: + return False + + type_ = ancestor.type + if type_ == 'trailer': + return False + + if type_ == 'expr_stmt': + return node.start_pos < ancestor.children[-1].start_pos + + return is_definition(node, ancestor.parent) + + if is_definition(node, node.parent): + args = [c for c in node.children if c != ','] + starred = [c for c in args if c.type == 'star_expr'] + if len(starred) > 1: + if self._normalizer.version < (3, 9): + message = "two starred expressions in assignment" + else: + message = "multiple starred expressions in assignment" + self.add_issue(starred[1], message=message) + elif starred: + count = args.index(starred[0]) + if count >= 256: + message = "too many expressions in star-unpacking assignment" + self.add_issue(starred[0], message=message) + + +@ErrorFinder.register_rule(type='annassign') +class _AnnotatorRule(SyntaxRule): + # True: int + # {}: float + message = "illegal target for annotation" + + def get_node(self, node): + return node.parent + + def is_issue(self, node): + type_ = None + lhs = node.parent.children[0] + lhs = _remove_parens(lhs) + try: + children = lhs.children + except AttributeError: + pass + else: + if ',' in children or lhs.type == 'atom' and children[0] == '(': + type_ = 'tuple' + elif lhs.type == 'atom' and children[0] == '[': + type_ = 'list' + trailer = children[-1] + + if type_ is None: + if not (lhs.type == 'name' + # subscript/attributes are allowed + or lhs.type in ('atom_expr', 'power') + and trailer.type == 'trailer' + and trailer.children[0] != '('): + return True + else: + # x, y: str + message = "only single target (not %s) can be annotated" + self.add_issue(lhs.parent, message=message % type_) + + +@ErrorFinder.register_rule(type='argument') +class _ArgumentRule(SyntaxRule): + def is_issue(self, node): + first = node.children[0] + if self._normalizer.version < (3, 8): + # a((b)=c) is valid in <3.8 + first = _remove_parens(first) + if node.children[1] == '=' and first.type != 'name': + if first.type == 'lambdef': + # f(lambda: 1=1) + if self._normalizer.version < (3, 8): + message = "lambda cannot contain assignment" + else: + message = 'expression cannot contain assignment, perhaps you meant "=="?' + else: + # f(+x=1) + if self._normalizer.version < (3, 8): + message = "keyword can't be an expression" + else: + message = 'expression cannot contain assignment, perhaps you meant "=="?' + self.add_issue(first, message=message) + + if _is_argument_comprehension(node) and node.parent.type == 'classdef': + self.add_issue(node, message='invalid syntax') + + +@ErrorFinder.register_rule(type='nonlocal_stmt') +class _NonlocalModuleLevelRule(SyntaxRule): + message = "nonlocal declaration not allowed at module level" + + def is_issue(self, node): + return self._normalizer.context.parent_context is None + + +@ErrorFinder.register_rule(type='arglist') +class _ArglistRule(SyntaxRule): + @property + def message(self): + if self._normalizer.version < (3, 7): + return "Generator expression must be parenthesized if not sole argument" + else: + return "Generator expression must be parenthesized" + + def is_issue(self, node): + arg_set = set() + kw_only = False + kw_unpacking_only = False + for argument in node.children: + if argument == ',': + continue + + if argument.type == 'argument': + first = argument.children[0] + if _is_argument_comprehension(argument) and len(node.children) >= 2: + # a(a, b for b in c) + return True + + if first in ('*', '**'): + if first == '*': + if kw_unpacking_only: + # foo(**kwargs, *args) + message = "iterable argument unpacking " \ + "follows keyword argument unpacking" + self.add_issue(argument, message=message) + else: + kw_unpacking_only = True + else: # Is a keyword argument. + kw_only = True + if first.type == 'name': + if first.value in arg_set: + # f(x=1, x=2) + message = "keyword argument repeated" + if self._normalizer.version >= (3, 9): + message += ": {}".format(first.value) + self.add_issue(first, message=message) + else: + arg_set.add(first.value) + else: + if kw_unpacking_only: + # f(**x, y) + message = "positional argument follows keyword argument unpacking" + self.add_issue(argument, message=message) + elif kw_only: + # f(x=2, y) + message = "positional argument follows keyword argument" + self.add_issue(argument, message=message) + + +@ErrorFinder.register_rule(type='parameters') +@ErrorFinder.register_rule(type='lambdef') +class _ParameterRule(SyntaxRule): + # def f(x=3, y): pass + message = "non-default argument follows default argument" + + def is_issue(self, node): + param_names = set() + default_only = False + star_seen = False + for p in _iter_params(node): + if p.type == 'operator': + if p.value == '*': + star_seen = True + default_only = False + continue + + if p.name.value in param_names: + message = "duplicate argument '%s' in function definition" + self.add_issue(p.name, message=message % p.name.value) + param_names.add(p.name.value) + + if not star_seen: + if p.default is None and not p.star_count: + if default_only: + return True + elif p.star_count: + star_seen = True + default_only = False + else: + default_only = True + + +@ErrorFinder.register_rule(type='try_stmt') +class _TryStmtRule(SyntaxRule): + message = "default 'except:' must be last" + + def is_issue(self, try_stmt): + default_except = None + for except_clause in try_stmt.children[3::3]: + if except_clause in ('else', 'finally'): + break + if except_clause == 'except': + default_except = except_clause + elif default_except is not None: + self.add_issue(default_except, message=self.message) + + +@ErrorFinder.register_rule(type='fstring') +class _FStringRule(SyntaxRule): + _fstring_grammar = None + message_expr = "f-string expression part cannot include a backslash" + message_nested = "f-string: expressions nested too deeply" + message_conversion = "f-string: invalid conversion character: expected 's', 'r', or 'a'" + + def _check_format_spec(self, format_spec, depth): + self._check_fstring_contents(format_spec.children[1:], depth) + + def _check_fstring_expr(self, fstring_expr, depth): + if depth >= 2: + self.add_issue(fstring_expr, message=self.message_nested) + + expr = fstring_expr.children[1] + if '\\' in expr.get_code(): + self.add_issue(expr, message=self.message_expr) + + children_2 = fstring_expr.children[2] + if children_2.type == 'operator' and children_2.value == '=': + conversion = fstring_expr.children[3] + else: + conversion = children_2 + if conversion.type == 'fstring_conversion': + name = conversion.children[1] + if name.value not in ('s', 'r', 'a'): + self.add_issue(name, message=self.message_conversion) + + format_spec = fstring_expr.children[-2] + if format_spec.type == 'fstring_format_spec': + self._check_format_spec(format_spec, depth + 1) + + def is_issue(self, fstring): + self._check_fstring_contents(fstring.children[1:-1]) + + def _check_fstring_contents(self, children, depth=0): + for fstring_content in children: + if fstring_content.type == 'fstring_expr': + self._check_fstring_expr(fstring_content, depth) + + +class _CheckAssignmentRule(SyntaxRule): + def _check_assignment(self, node, is_deletion=False, is_namedexpr=False, is_aug_assign=False): + error = None + type_ = node.type + if type_ == 'lambdef': + error = 'lambda' + elif type_ == 'atom': + first, second = node.children[:2] + error = _get_comprehension_type(node) + if error is None: + if second.type == 'dictorsetmaker': + if self._normalizer.version < (3, 8): + error = 'literal' + else: + if second.children[1] == ':': + error = 'dict display' + else: + error = 'set display' + elif first == "{" and second == "}": + if self._normalizer.version < (3, 8): + error = 'literal' + else: + error = "dict display" + elif first == "{" and len(node.children) > 2: + if self._normalizer.version < (3, 8): + error = 'literal' + else: + error = "set display" + elif first in ('(', '['): + if second.type == 'yield_expr': + error = 'yield expression' + elif second.type == 'testlist_comp': + # ([a, b] := [1, 2]) + # ((a, b) := [1, 2]) + if is_namedexpr: + if first == '(': + error = 'tuple' + elif first == '[': + error = 'list' + + # This is not a comprehension, they were handled + # further above. + for child in second.children[::2]: + self._check_assignment(child, is_deletion, is_namedexpr, is_aug_assign) + else: # Everything handled, must be useless brackets. + self._check_assignment(second, is_deletion, is_namedexpr, is_aug_assign) + elif type_ == 'keyword': + if node.value == "yield": + error = "yield expression" + elif self._normalizer.version < (3, 8): + error = 'keyword' + else: + error = str(node.value) + elif type_ == 'operator': + if node.value == '...': + error = 'Ellipsis' + elif type_ == 'comparison': + error = 'comparison' + elif type_ in ('string', 'number', 'strings'): + error = 'literal' + elif type_ == 'yield_expr': + # This one seems to be a slightly different warning in Python. + message = 'assignment to yield expression not possible' + self.add_issue(node, message=message) + elif type_ == 'test': + error = 'conditional expression' + elif type_ in ('atom_expr', 'power'): + if node.children[0] == 'await': + error = 'await expression' + elif node.children[-2] == '**': + error = 'operator' + else: + # Has a trailer + trailer = node.children[-1] + assert trailer.type == 'trailer' + if trailer.children[0] == '(': + error = 'function call' + elif is_namedexpr and trailer.children[0] == '[': + error = 'subscript' + elif is_namedexpr and trailer.children[0] == '.': + error = 'attribute' + elif type_ == "fstring": + if self._normalizer.version < (3, 8): + error = 'literal' + else: + error = "f-string expression" + elif type_ in ('testlist_star_expr', 'exprlist', 'testlist'): + for child in node.children[::2]: + self._check_assignment(child, is_deletion, is_namedexpr, is_aug_assign) + elif ('expr' in type_ and type_ != 'star_expr' # is a substring + or '_test' in type_ + or type_ in ('term', 'factor')): + error = 'operator' + elif type_ == "star_expr": + if is_deletion: + if self._normalizer.version >= (3, 9): + error = "starred" + else: + self.add_issue(node, message="can't use starred expression here") + else: + if self._normalizer.version >= (3, 9): + ancestor = node.parent + else: + ancestor = _skip_parens_bottom_up(node) + if ancestor.type not in _STAR_EXPR_PARENTS and not is_aug_assign \ + and not (ancestor.type == 'atom' and ancestor.children[0] == '['): + message = "starred assignment target must be in a list or tuple" + self.add_issue(node, message=message) + + self._check_assignment(node.children[1]) + + if error is not None: + if is_namedexpr: + message = 'cannot use assignment expressions with %s' % error + else: + cannot = "can't" if self._normalizer.version < (3, 8) else "cannot" + message = ' '.join([cannot, "delete" if is_deletion else "assign to", error]) + self.add_issue(node, message=message) + + +@ErrorFinder.register_rule(type='sync_comp_for') +class _CompForRule(_CheckAssignmentRule): + message = "asynchronous comprehension outside of an asynchronous function" + + def is_issue(self, node): + expr_list = node.children[1] + if expr_list.type != 'expr_list': # Already handled. + self._check_assignment(expr_list) + + return node.parent.children[0] == 'async' \ + and not self._normalizer.context.is_async_funcdef() + + +@ErrorFinder.register_rule(type='expr_stmt') +class _ExprStmtRule(_CheckAssignmentRule): + message = "illegal expression for augmented assignment" + extended_message = "'{target}' is an " + message + + def is_issue(self, node): + augassign = node.children[1] + is_aug_assign = augassign != '=' and augassign.type != 'annassign' + + if self._normalizer.version <= (3, 8) or not is_aug_assign: + for before_equal in node.children[:-2:2]: + self._check_assignment(before_equal, is_aug_assign=is_aug_assign) + + if is_aug_assign: + target = _remove_parens(node.children[0]) + # a, a[b], a.b + + if target.type == "name" or ( + target.type in ("atom_expr", "power") + and target.children[1].type == "trailer" + and target.children[-1].children[0] != "(" + ): + return False + + if self._normalizer.version <= (3, 8): + return True + else: + self.add_issue( + node, + message=self.extended_message.format( + target=_get_rhs_name(node.children[0], self._normalizer.version) + ), + ) + + +@ErrorFinder.register_rule(type='with_item') +class _WithItemRule(_CheckAssignmentRule): + def is_issue(self, with_item): + self._check_assignment(with_item.children[2]) + + +@ErrorFinder.register_rule(type='del_stmt') +class _DelStmtRule(_CheckAssignmentRule): + def is_issue(self, del_stmt): + child = del_stmt.children[1] + + if child.type != 'expr_list': # Already handled. + self._check_assignment(child, is_deletion=True) + + +@ErrorFinder.register_rule(type='expr_list') +class _ExprListRule(_CheckAssignmentRule): + def is_issue(self, expr_list): + for expr in expr_list.children[::2]: + self._check_assignment(expr) + + +@ErrorFinder.register_rule(type='for_stmt') +class _ForStmtRule(_CheckAssignmentRule): + def is_issue(self, for_stmt): + # Some of the nodes here are already used, so no else if + expr_list = for_stmt.children[1] + if expr_list.type != 'expr_list': # Already handled. + self._check_assignment(expr_list) + + +@ErrorFinder.register_rule(type='namedexpr_test') +class _NamedExprRule(_CheckAssignmentRule): + # namedexpr_test: test [':=' test] + + def is_issue(self, namedexpr_test): + # assigned name + first = namedexpr_test.children[0] + + def search_namedexpr_in_comp_for(node): + while True: + parent = node.parent + if parent is None: + return parent + if parent.type == 'sync_comp_for' and parent.children[3] == node: + return parent + node = parent + + if search_namedexpr_in_comp_for(namedexpr_test): + # [i+1 for i in (i := range(5))] + # [i+1 for i in (j := range(5))] + # [i+1 for i in (lambda: (j := range(5)))()] + message = 'assignment expression cannot be used in a comprehension iterable expression' + self.add_issue(namedexpr_test, message=message) + + # defined names + exprlist = list() + + def process_comp_for(comp_for): + if comp_for.type == 'sync_comp_for': + comp = comp_for + elif comp_for.type == 'comp_for': + comp = comp_for.children[1] + exprlist.extend(_get_for_stmt_definition_exprs(comp)) + + def search_all_comp_ancestors(node): + has_ancestors = False + while True: + node = node.search_ancestor('testlist_comp', 'dictorsetmaker') + if node is None: + break + for child in node.children: + if child.type in _COMP_FOR_TYPES: + process_comp_for(child) + has_ancestors = True + break + return has_ancestors + + # check assignment expressions in comprehensions + search_all = search_all_comp_ancestors(namedexpr_test) + if search_all: + if self._normalizer.context.node.type == 'classdef': + message = 'assignment expression within a comprehension ' \ + 'cannot be used in a class body' + self.add_issue(namedexpr_test, message=message) + + namelist = [expr.value for expr in exprlist if expr.type == 'name'] + if first.type == 'name' and first.value in namelist: + # [i := 0 for i, j in range(5)] + # [[(i := i) for j in range(5)] for i in range(5)] + # [i for i, j in range(5) if True or (i := 1)] + # [False and (i := 0) for i, j in range(5)] + message = 'assignment expression cannot rebind ' \ + 'comprehension iteration variable %r' % first.value + self.add_issue(namedexpr_test, message=message) + + self._check_assignment(first, is_namedexpr=True) diff --git a/bundle/jedi-vim/pythonx/parso/parso/python/grammar310.txt b/bundle/jedi-vim/pythonx/parso/parso/python/grammar310.txt new file mode 100644 index 000000000..f092050d8 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/python/grammar310.txt @@ -0,0 +1,169 @@ +# Grammar for Python + +# NOTE WELL: You should also follow all the steps listed at +# https://devguide.python.org/grammar/ + +# Start symbols for the grammar: +# single_input is a single interactive statement; +# file_input is a module or sequence of commands read from an input file; +# eval_input is the input for the eval() functions. +# NB: compound_stmt in single_input is followed by extra NEWLINE! +single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE +file_input: stmt* ENDMARKER +eval_input: testlist NEWLINE* ENDMARKER + +decorator: '@' namedexpr_test NEWLINE +decorators: decorator+ +decorated: decorators (classdef | funcdef | async_funcdef) + +async_funcdef: 'async' funcdef +funcdef: 'def' NAME parameters ['->' test] ':' suite + +parameters: '(' [typedargslist] ')' +typedargslist: ( + (tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [',' [ tfpdef ['=' test] ( + ',' tfpdef ['=' test])* ([',' [ + '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']]]) + | '*' [tfpdef] (',' tfpdef ['=' test])* ([',' ['**' tfpdef [',']]]) + | '**' tfpdef [',']]] ) +| (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [ + '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']]] + | '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']) +) +tfpdef: NAME [':' test] +varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [ + '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [',']]] + | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [',']) ]] | (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [ + '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [',']]] + | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [','] +) +vfpdef: NAME + +stmt: simple_stmt | compound_stmt | NEWLINE +simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE +small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt | + import_stmt | global_stmt | nonlocal_stmt | assert_stmt) +expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | + ('=' (yield_expr|testlist_star_expr))*) +annassign: ':' test ['=' (yield_expr|testlist_star_expr)] +testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] +augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | + '<<=' | '>>=' | '**=' | '//=') +# For normal and annotated assignments, additional restrictions enforced by the interpreter +del_stmt: 'del' exprlist +pass_stmt: 'pass' +flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt +break_stmt: 'break' +continue_stmt: 'continue' +return_stmt: 'return' [testlist_star_expr] +yield_stmt: yield_expr +raise_stmt: 'raise' [test ['from' test]] +import_stmt: import_name | import_from +import_name: 'import' dotted_as_names +# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS +import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+) + 'import' ('*' | '(' import_as_names ')' | import_as_names)) +import_as_name: NAME ['as' NAME] +dotted_as_name: dotted_name ['as' NAME] +import_as_names: import_as_name (',' import_as_name)* [','] +dotted_as_names: dotted_as_name (',' dotted_as_name)* +dotted_name: NAME ('.' NAME)* +global_stmt: 'global' NAME (',' NAME)* +nonlocal_stmt: 'nonlocal' NAME (',' NAME)* +assert_stmt: 'assert' test [',' test] + +compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt +async_stmt: 'async' (funcdef | with_stmt | for_stmt) +if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite] +while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite] +for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] +try_stmt: ('try' ':' suite + ((except_clause ':' suite)+ + ['else' ':' suite] + ['finally' ':' suite] | + 'finally' ':' suite)) +with_stmt: 'with' with_item (',' with_item)* ':' suite +with_item: test ['as' expr] +# NB compile.c makes sure that the default except clause is last +except_clause: 'except' [test ['as' NAME]] +suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT + +namedexpr_test: test [':=' test] +test: or_test ['if' or_test 'else' test] | lambdef +lambdef: 'lambda' [varargslist] ':' test +or_test: and_test ('or' and_test)* +and_test: not_test ('and' not_test)* +not_test: 'not' not_test | comparison +comparison: expr (comp_op expr)* +# <> isn't actually a valid comparison operator in Python. It's here for the +# sake of a __future__ import described in PEP 401 (which really works :-) +comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' +star_expr: '*' expr +expr: xor_expr ('|' xor_expr)* +xor_expr: and_expr ('^' and_expr)* +and_expr: shift_expr ('&' shift_expr)* +shift_expr: arith_expr (('<<'|'>>') arith_expr)* +arith_expr: term (('+'|'-') term)* +term: factor (('*'|'@'|'/'|'%'|'//') factor)* +factor: ('+'|'-'|'~') factor | power +power: atom_expr ['**' factor] +atom_expr: ['await'] atom trailer* +atom: ('(' [yield_expr|testlist_comp] ')' | + '[' [testlist_comp] ']' | + '{' [dictorsetmaker] '}' | + NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False') +testlist_comp: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] ) +trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME +subscriptlist: subscript (',' subscript)* [','] +subscript: test [':=' test] | [test] ':' [test] [sliceop] +sliceop: ':' [test] +exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] +testlist: test (',' test)* [','] +dictorsetmaker: ( ((test ':' test | '**' expr) + (comp_for | (',' (test ':' test | '**' expr))* [','])) | + ((test [':=' test] | star_expr) + (comp_for | (',' (test [':=' test] | star_expr))* [','])) ) + +classdef: 'class' NAME ['(' [arglist] ')'] ':' suite + +arglist: argument (',' argument)* [','] + +# The reason that keywords are test nodes instead of NAME is that using NAME +# results in an ambiguity. ast.c makes sure it's a NAME. +# "test '=' test" is really "keyword '=' test", but we have no such token. +# These need to be in a single rule to avoid grammar that is ambiguous +# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, +# we explicitly match '*' here, too, to give it proper precedence. +# Illegal combinations and orderings are blocked in ast.c: +# multiple (test comp_for) arguments are blocked; keyword unpackings +# that precede iterable unpackings are blocked; etc. +argument: ( test [comp_for] | + test ':=' test | + test '=' test | + '**' test | + '*' test ) + +comp_iter: comp_for | comp_if +sync_comp_for: 'for' exprlist 'in' or_test [comp_iter] +comp_for: ['async'] sync_comp_for +comp_if: 'if' or_test [comp_iter] + +# not used in grammar, but may appear in "node" passed from Parser to Compiler +encoding_decl: NAME + +yield_expr: 'yield' [yield_arg] +yield_arg: 'from' test | testlist_star_expr + +strings: (STRING | fstring)+ +fstring: FSTRING_START fstring_content* FSTRING_END +fstring_content: FSTRING_STRING | fstring_expr +fstring_conversion: '!' NAME +fstring_expr: '{' (testlist_comp | yield_expr) ['='] [ fstring_conversion ] [ fstring_format_spec ] '}' +fstring_format_spec: ':' fstring_content* diff --git a/bundle/jedi-vim/pythonx/parso/parso/python/grammar311.txt b/bundle/jedi-vim/pythonx/parso/parso/python/grammar311.txt new file mode 100644 index 000000000..f092050d8 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/python/grammar311.txt @@ -0,0 +1,169 @@ +# Grammar for Python + +# NOTE WELL: You should also follow all the steps listed at +# https://devguide.python.org/grammar/ + +# Start symbols for the grammar: +# single_input is a single interactive statement; +# file_input is a module or sequence of commands read from an input file; +# eval_input is the input for the eval() functions. +# NB: compound_stmt in single_input is followed by extra NEWLINE! +single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE +file_input: stmt* ENDMARKER +eval_input: testlist NEWLINE* ENDMARKER + +decorator: '@' namedexpr_test NEWLINE +decorators: decorator+ +decorated: decorators (classdef | funcdef | async_funcdef) + +async_funcdef: 'async' funcdef +funcdef: 'def' NAME parameters ['->' test] ':' suite + +parameters: '(' [typedargslist] ')' +typedargslist: ( + (tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [',' [ tfpdef ['=' test] ( + ',' tfpdef ['=' test])* ([',' [ + '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']]]) + | '*' [tfpdef] (',' tfpdef ['=' test])* ([',' ['**' tfpdef [',']]]) + | '**' tfpdef [',']]] ) +| (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [ + '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']]] + | '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']) +) +tfpdef: NAME [':' test] +varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [ + '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [',']]] + | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [',']) ]] | (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [ + '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [',']]] + | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [','] +) +vfpdef: NAME + +stmt: simple_stmt | compound_stmt | NEWLINE +simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE +small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt | + import_stmt | global_stmt | nonlocal_stmt | assert_stmt) +expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | + ('=' (yield_expr|testlist_star_expr))*) +annassign: ':' test ['=' (yield_expr|testlist_star_expr)] +testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] +augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | + '<<=' | '>>=' | '**=' | '//=') +# For normal and annotated assignments, additional restrictions enforced by the interpreter +del_stmt: 'del' exprlist +pass_stmt: 'pass' +flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt +break_stmt: 'break' +continue_stmt: 'continue' +return_stmt: 'return' [testlist_star_expr] +yield_stmt: yield_expr +raise_stmt: 'raise' [test ['from' test]] +import_stmt: import_name | import_from +import_name: 'import' dotted_as_names +# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS +import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+) + 'import' ('*' | '(' import_as_names ')' | import_as_names)) +import_as_name: NAME ['as' NAME] +dotted_as_name: dotted_name ['as' NAME] +import_as_names: import_as_name (',' import_as_name)* [','] +dotted_as_names: dotted_as_name (',' dotted_as_name)* +dotted_name: NAME ('.' NAME)* +global_stmt: 'global' NAME (',' NAME)* +nonlocal_stmt: 'nonlocal' NAME (',' NAME)* +assert_stmt: 'assert' test [',' test] + +compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt +async_stmt: 'async' (funcdef | with_stmt | for_stmt) +if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite] +while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite] +for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] +try_stmt: ('try' ':' suite + ((except_clause ':' suite)+ + ['else' ':' suite] + ['finally' ':' suite] | + 'finally' ':' suite)) +with_stmt: 'with' with_item (',' with_item)* ':' suite +with_item: test ['as' expr] +# NB compile.c makes sure that the default except clause is last +except_clause: 'except' [test ['as' NAME]] +suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT + +namedexpr_test: test [':=' test] +test: or_test ['if' or_test 'else' test] | lambdef +lambdef: 'lambda' [varargslist] ':' test +or_test: and_test ('or' and_test)* +and_test: not_test ('and' not_test)* +not_test: 'not' not_test | comparison +comparison: expr (comp_op expr)* +# <> isn't actually a valid comparison operator in Python. It's here for the +# sake of a __future__ import described in PEP 401 (which really works :-) +comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' +star_expr: '*' expr +expr: xor_expr ('|' xor_expr)* +xor_expr: and_expr ('^' and_expr)* +and_expr: shift_expr ('&' shift_expr)* +shift_expr: arith_expr (('<<'|'>>') arith_expr)* +arith_expr: term (('+'|'-') term)* +term: factor (('*'|'@'|'/'|'%'|'//') factor)* +factor: ('+'|'-'|'~') factor | power +power: atom_expr ['**' factor] +atom_expr: ['await'] atom trailer* +atom: ('(' [yield_expr|testlist_comp] ')' | + '[' [testlist_comp] ']' | + '{' [dictorsetmaker] '}' | + NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False') +testlist_comp: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] ) +trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME +subscriptlist: subscript (',' subscript)* [','] +subscript: test [':=' test] | [test] ':' [test] [sliceop] +sliceop: ':' [test] +exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] +testlist: test (',' test)* [','] +dictorsetmaker: ( ((test ':' test | '**' expr) + (comp_for | (',' (test ':' test | '**' expr))* [','])) | + ((test [':=' test] | star_expr) + (comp_for | (',' (test [':=' test] | star_expr))* [','])) ) + +classdef: 'class' NAME ['(' [arglist] ')'] ':' suite + +arglist: argument (',' argument)* [','] + +# The reason that keywords are test nodes instead of NAME is that using NAME +# results in an ambiguity. ast.c makes sure it's a NAME. +# "test '=' test" is really "keyword '=' test", but we have no such token. +# These need to be in a single rule to avoid grammar that is ambiguous +# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, +# we explicitly match '*' here, too, to give it proper precedence. +# Illegal combinations and orderings are blocked in ast.c: +# multiple (test comp_for) arguments are blocked; keyword unpackings +# that precede iterable unpackings are blocked; etc. +argument: ( test [comp_for] | + test ':=' test | + test '=' test | + '**' test | + '*' test ) + +comp_iter: comp_for | comp_if +sync_comp_for: 'for' exprlist 'in' or_test [comp_iter] +comp_for: ['async'] sync_comp_for +comp_if: 'if' or_test [comp_iter] + +# not used in grammar, but may appear in "node" passed from Parser to Compiler +encoding_decl: NAME + +yield_expr: 'yield' [yield_arg] +yield_arg: 'from' test | testlist_star_expr + +strings: (STRING | fstring)+ +fstring: FSTRING_START fstring_content* FSTRING_END +fstring_content: FSTRING_STRING | fstring_expr +fstring_conversion: '!' NAME +fstring_expr: '{' (testlist_comp | yield_expr) ['='] [ fstring_conversion ] [ fstring_format_spec ] '}' +fstring_format_spec: ':' fstring_content* diff --git a/bundle/jedi-vim/pythonx/parso/parso/python/grammar312.txt b/bundle/jedi-vim/pythonx/parso/parso/python/grammar312.txt new file mode 100644 index 000000000..f092050d8 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/python/grammar312.txt @@ -0,0 +1,169 @@ +# Grammar for Python + +# NOTE WELL: You should also follow all the steps listed at +# https://devguide.python.org/grammar/ + +# Start symbols for the grammar: +# single_input is a single interactive statement; +# file_input is a module or sequence of commands read from an input file; +# eval_input is the input for the eval() functions. +# NB: compound_stmt in single_input is followed by extra NEWLINE! +single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE +file_input: stmt* ENDMARKER +eval_input: testlist NEWLINE* ENDMARKER + +decorator: '@' namedexpr_test NEWLINE +decorators: decorator+ +decorated: decorators (classdef | funcdef | async_funcdef) + +async_funcdef: 'async' funcdef +funcdef: 'def' NAME parameters ['->' test] ':' suite + +parameters: '(' [typedargslist] ')' +typedargslist: ( + (tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [',' [ tfpdef ['=' test] ( + ',' tfpdef ['=' test])* ([',' [ + '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']]]) + | '*' [tfpdef] (',' tfpdef ['=' test])* ([',' ['**' tfpdef [',']]]) + | '**' tfpdef [',']]] ) +| (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [ + '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']]] + | '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']) +) +tfpdef: NAME [':' test] +varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [ + '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [',']]] + | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [',']) ]] | (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [ + '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [',']]] + | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [','] +) +vfpdef: NAME + +stmt: simple_stmt | compound_stmt | NEWLINE +simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE +small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt | + import_stmt | global_stmt | nonlocal_stmt | assert_stmt) +expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | + ('=' (yield_expr|testlist_star_expr))*) +annassign: ':' test ['=' (yield_expr|testlist_star_expr)] +testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] +augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | + '<<=' | '>>=' | '**=' | '//=') +# For normal and annotated assignments, additional restrictions enforced by the interpreter +del_stmt: 'del' exprlist +pass_stmt: 'pass' +flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt +break_stmt: 'break' +continue_stmt: 'continue' +return_stmt: 'return' [testlist_star_expr] +yield_stmt: yield_expr +raise_stmt: 'raise' [test ['from' test]] +import_stmt: import_name | import_from +import_name: 'import' dotted_as_names +# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS +import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+) + 'import' ('*' | '(' import_as_names ')' | import_as_names)) +import_as_name: NAME ['as' NAME] +dotted_as_name: dotted_name ['as' NAME] +import_as_names: import_as_name (',' import_as_name)* [','] +dotted_as_names: dotted_as_name (',' dotted_as_name)* +dotted_name: NAME ('.' NAME)* +global_stmt: 'global' NAME (',' NAME)* +nonlocal_stmt: 'nonlocal' NAME (',' NAME)* +assert_stmt: 'assert' test [',' test] + +compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt +async_stmt: 'async' (funcdef | with_stmt | for_stmt) +if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite] +while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite] +for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] +try_stmt: ('try' ':' suite + ((except_clause ':' suite)+ + ['else' ':' suite] + ['finally' ':' suite] | + 'finally' ':' suite)) +with_stmt: 'with' with_item (',' with_item)* ':' suite +with_item: test ['as' expr] +# NB compile.c makes sure that the default except clause is last +except_clause: 'except' [test ['as' NAME]] +suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT + +namedexpr_test: test [':=' test] +test: or_test ['if' or_test 'else' test] | lambdef +lambdef: 'lambda' [varargslist] ':' test +or_test: and_test ('or' and_test)* +and_test: not_test ('and' not_test)* +not_test: 'not' not_test | comparison +comparison: expr (comp_op expr)* +# <> isn't actually a valid comparison operator in Python. It's here for the +# sake of a __future__ import described in PEP 401 (which really works :-) +comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' +star_expr: '*' expr +expr: xor_expr ('|' xor_expr)* +xor_expr: and_expr ('^' and_expr)* +and_expr: shift_expr ('&' shift_expr)* +shift_expr: arith_expr (('<<'|'>>') arith_expr)* +arith_expr: term (('+'|'-') term)* +term: factor (('*'|'@'|'/'|'%'|'//') factor)* +factor: ('+'|'-'|'~') factor | power +power: atom_expr ['**' factor] +atom_expr: ['await'] atom trailer* +atom: ('(' [yield_expr|testlist_comp] ')' | + '[' [testlist_comp] ']' | + '{' [dictorsetmaker] '}' | + NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False') +testlist_comp: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] ) +trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME +subscriptlist: subscript (',' subscript)* [','] +subscript: test [':=' test] | [test] ':' [test] [sliceop] +sliceop: ':' [test] +exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] +testlist: test (',' test)* [','] +dictorsetmaker: ( ((test ':' test | '**' expr) + (comp_for | (',' (test ':' test | '**' expr))* [','])) | + ((test [':=' test] | star_expr) + (comp_for | (',' (test [':=' test] | star_expr))* [','])) ) + +classdef: 'class' NAME ['(' [arglist] ')'] ':' suite + +arglist: argument (',' argument)* [','] + +# The reason that keywords are test nodes instead of NAME is that using NAME +# results in an ambiguity. ast.c makes sure it's a NAME. +# "test '=' test" is really "keyword '=' test", but we have no such token. +# These need to be in a single rule to avoid grammar that is ambiguous +# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, +# we explicitly match '*' here, too, to give it proper precedence. +# Illegal combinations and orderings are blocked in ast.c: +# multiple (test comp_for) arguments are blocked; keyword unpackings +# that precede iterable unpackings are blocked; etc. +argument: ( test [comp_for] | + test ':=' test | + test '=' test | + '**' test | + '*' test ) + +comp_iter: comp_for | comp_if +sync_comp_for: 'for' exprlist 'in' or_test [comp_iter] +comp_for: ['async'] sync_comp_for +comp_if: 'if' or_test [comp_iter] + +# not used in grammar, but may appear in "node" passed from Parser to Compiler +encoding_decl: NAME + +yield_expr: 'yield' [yield_arg] +yield_arg: 'from' test | testlist_star_expr + +strings: (STRING | fstring)+ +fstring: FSTRING_START fstring_content* FSTRING_END +fstring_content: FSTRING_STRING | fstring_expr +fstring_conversion: '!' NAME +fstring_expr: '{' (testlist_comp | yield_expr) ['='] [ fstring_conversion ] [ fstring_format_spec ] '}' +fstring_format_spec: ':' fstring_content* diff --git a/bundle/jedi-vim/pythonx/parso/parso/python/grammar36.txt b/bundle/jedi-vim/pythonx/parso/parso/python/grammar36.txt new file mode 100644 index 000000000..e79620668 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/python/grammar36.txt @@ -0,0 +1,158 @@ +# Grammar for Python + +# NOTE WELL: You should also follow all the steps listed at +# https://docs.python.org/devguide/grammar.html + +# Start symbols for the grammar: +# single_input is a single interactive statement; +# file_input is a module or sequence of commands read from an input file; +# eval_input is the input for the eval() functions. +# NB: compound_stmt in single_input is followed by extra NEWLINE! +single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE +file_input: stmt* ENDMARKER +eval_input: testlist NEWLINE* ENDMARKER +decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE +decorators: decorator+ +decorated: decorators (classdef | funcdef | async_funcdef) + +# NOTE: Francisco Souza/Reinoud Elhorst, using ASYNC/'await' keywords instead of +# skipping python3.5+ compatibility, in favour of 3.7 solution +async_funcdef: 'async' funcdef +funcdef: 'def' NAME parameters ['->' test] ':' suite + +parameters: '(' [typedargslist] ')' +typedargslist: (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [ + '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']]] + | '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']) +tfpdef: NAME [':' test] +varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [ + '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [',']]] + | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [','] +) +vfpdef: NAME + +stmt: simple_stmt | compound_stmt | NEWLINE +simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE +small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt | + import_stmt | global_stmt | nonlocal_stmt | assert_stmt) +expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | + ('=' (yield_expr|testlist_star_expr))*) +annassign: ':' test ['=' test] +testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] +augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | + '<<=' | '>>=' | '**=' | '//=') +# For normal and annotated assignments, additional restrictions enforced by the interpreter +del_stmt: 'del' exprlist +pass_stmt: 'pass' +flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt +break_stmt: 'break' +continue_stmt: 'continue' +return_stmt: 'return' [testlist] +yield_stmt: yield_expr +raise_stmt: 'raise' [test ['from' test]] +import_stmt: import_name | import_from +import_name: 'import' dotted_as_names +# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS +import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+) + 'import' ('*' | '(' import_as_names ')' | import_as_names)) +import_as_name: NAME ['as' NAME] +dotted_as_name: dotted_name ['as' NAME] +import_as_names: import_as_name (',' import_as_name)* [','] +dotted_as_names: dotted_as_name (',' dotted_as_name)* +dotted_name: NAME ('.' NAME)* +global_stmt: 'global' NAME (',' NAME)* +nonlocal_stmt: 'nonlocal' NAME (',' NAME)* +assert_stmt: 'assert' test [',' test] + +compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt +async_stmt: 'async' (funcdef | with_stmt | for_stmt) +if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] +while_stmt: 'while' test ':' suite ['else' ':' suite] +for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] +try_stmt: ('try' ':' suite + ((except_clause ':' suite)+ + ['else' ':' suite] + ['finally' ':' suite] | + 'finally' ':' suite)) +with_stmt: 'with' with_item (',' with_item)* ':' suite +with_item: test ['as' expr] +# NB compile.c makes sure that the default except clause is last +except_clause: 'except' [test ['as' NAME]] +suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT + +test: or_test ['if' or_test 'else' test] | lambdef +test_nocond: or_test | lambdef_nocond +lambdef: 'lambda' [varargslist] ':' test +lambdef_nocond: 'lambda' [varargslist] ':' test_nocond +or_test: and_test ('or' and_test)* +and_test: not_test ('and' not_test)* +not_test: 'not' not_test | comparison +comparison: expr (comp_op expr)* +# <> isn't actually a valid comparison operator in Python. It's here for the +# sake of a __future__ import described in PEP 401 (which really works :-) +comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' +star_expr: '*' expr +expr: xor_expr ('|' xor_expr)* +xor_expr: and_expr ('^' and_expr)* +and_expr: shift_expr ('&' shift_expr)* +shift_expr: arith_expr (('<<'|'>>') arith_expr)* +arith_expr: term (('+'|'-') term)* +term: factor (('*'|'@'|'/'|'%'|'//') factor)* +factor: ('+'|'-'|'~') factor | power +power: atom_expr ['**' factor] +atom_expr: ['await'] atom trailer* +atom: ('(' [yield_expr|testlist_comp] ')' | + '[' [testlist_comp] ']' | + '{' [dictorsetmaker] '}' | + NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False') +testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) +trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME +subscriptlist: subscript (',' subscript)* [','] +subscript: test | [test] ':' [test] [sliceop] +sliceop: ':' [test] +exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] +testlist: test (',' test)* [','] +dictorsetmaker: ( ((test ':' test | '**' expr) + (comp_for | (',' (test ':' test | '**' expr))* [','])) | + ((test | star_expr) + (comp_for | (',' (test | star_expr))* [','])) ) + +classdef: 'class' NAME ['(' [arglist] ')'] ':' suite + +arglist: argument (',' argument)* [','] + +# The reason that keywords are test nodes instead of NAME is that using NAME +# results in an ambiguity. ast.c makes sure it's a NAME. +# "test '=' test" is really "keyword '=' test", but we have no such token. +# These need to be in a single rule to avoid grammar that is ambiguous +# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, +# we explicitly match '*' here, too, to give it proper precedence. +# Illegal combinations and orderings are blocked in ast.c: +# multiple (test comp_for) arguments are blocked; keyword unpackings +# that precede iterable unpackings are blocked; etc. +argument: ( test [comp_for] | + test '=' test | + '**' test | + '*' test ) + +comp_iter: comp_for | comp_if +sync_comp_for: 'for' exprlist 'in' or_test [comp_iter] +comp_for: ['async'] sync_comp_for +comp_if: 'if' test_nocond [comp_iter] + +# not used in grammar, but may appear in "node" passed from Parser to Compiler +encoding_decl: NAME + +yield_expr: 'yield' [yield_arg] +yield_arg: 'from' test | testlist + +strings: (STRING | fstring)+ +fstring: FSTRING_START fstring_content* FSTRING_END +fstring_content: FSTRING_STRING | fstring_expr +fstring_conversion: '!' NAME +fstring_expr: '{' (testlist_comp | yield_expr) [ fstring_conversion ] [ fstring_format_spec ] '}' +fstring_format_spec: ':' fstring_content* diff --git a/bundle/jedi-vim/pythonx/parso/parso/python/grammar37.txt b/bundle/jedi-vim/pythonx/parso/parso/python/grammar37.txt new file mode 100644 index 000000000..f4a929fe9 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/python/grammar37.txt @@ -0,0 +1,156 @@ +# Grammar for Python + +# NOTE WELL: You should also follow all the steps listed at +# https://docs.python.org/devguide/grammar.html + +# Start symbols for the grammar: +# single_input is a single interactive statement; +# file_input is a module or sequence of commands read from an input file; +# eval_input is the input for the eval() functions. +# NB: compound_stmt in single_input is followed by extra NEWLINE! +single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE +file_input: stmt* ENDMARKER +eval_input: testlist NEWLINE* ENDMARKER +decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE +decorators: decorator+ +decorated: decorators (classdef | funcdef | async_funcdef) + +async_funcdef: 'async' funcdef +funcdef: 'def' NAME parameters ['->' test] ':' suite + +parameters: '(' [typedargslist] ')' +typedargslist: (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [ + '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']]] + | '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']) +tfpdef: NAME [':' test] +varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [ + '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [',']]] + | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [','] +) +vfpdef: NAME + +stmt: simple_stmt | compound_stmt | NEWLINE +simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE +small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt | + import_stmt | global_stmt | nonlocal_stmt | assert_stmt) +expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | + ('=' (yield_expr|testlist_star_expr))*) +annassign: ':' test ['=' test] +testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] +augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | + '<<=' | '>>=' | '**=' | '//=') +# For normal and annotated assignments, additional restrictions enforced by the interpreter +del_stmt: 'del' exprlist +pass_stmt: 'pass' +flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt +break_stmt: 'break' +continue_stmt: 'continue' +return_stmt: 'return' [testlist] +yield_stmt: yield_expr +raise_stmt: 'raise' [test ['from' test]] +import_stmt: import_name | import_from +import_name: 'import' dotted_as_names +# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS +import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+) + 'import' ('*' | '(' import_as_names ')' | import_as_names)) +import_as_name: NAME ['as' NAME] +dotted_as_name: dotted_name ['as' NAME] +import_as_names: import_as_name (',' import_as_name)* [','] +dotted_as_names: dotted_as_name (',' dotted_as_name)* +dotted_name: NAME ('.' NAME)* +global_stmt: 'global' NAME (',' NAME)* +nonlocal_stmt: 'nonlocal' NAME (',' NAME)* +assert_stmt: 'assert' test [',' test] + +compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt +async_stmt: 'async' (funcdef | with_stmt | for_stmt) +if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] +while_stmt: 'while' test ':' suite ['else' ':' suite] +for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] +try_stmt: ('try' ':' suite + ((except_clause ':' suite)+ + ['else' ':' suite] + ['finally' ':' suite] | + 'finally' ':' suite)) +with_stmt: 'with' with_item (',' with_item)* ':' suite +with_item: test ['as' expr] +# NB compile.c makes sure that the default except clause is last +except_clause: 'except' [test ['as' NAME]] +suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT + +test: or_test ['if' or_test 'else' test] | lambdef +test_nocond: or_test | lambdef_nocond +lambdef: 'lambda' [varargslist] ':' test +lambdef_nocond: 'lambda' [varargslist] ':' test_nocond +or_test: and_test ('or' and_test)* +and_test: not_test ('and' not_test)* +not_test: 'not' not_test | comparison +comparison: expr (comp_op expr)* +# <> isn't actually a valid comparison operator in Python. It's here for the +# sake of a __future__ import described in PEP 401 (which really works :-) +comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' +star_expr: '*' expr +expr: xor_expr ('|' xor_expr)* +xor_expr: and_expr ('^' and_expr)* +and_expr: shift_expr ('&' shift_expr)* +shift_expr: arith_expr (('<<'|'>>') arith_expr)* +arith_expr: term (('+'|'-') term)* +term: factor (('*'|'@'|'/'|'%'|'//') factor)* +factor: ('+'|'-'|'~') factor | power +power: atom_expr ['**' factor] +atom_expr: ['await'] atom trailer* +atom: ('(' [yield_expr|testlist_comp] ')' | + '[' [testlist_comp] ']' | + '{' [dictorsetmaker] '}' | + NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False') +testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) +trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME +subscriptlist: subscript (',' subscript)* [','] +subscript: test | [test] ':' [test] [sliceop] +sliceop: ':' [test] +exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] +testlist: test (',' test)* [','] +dictorsetmaker: ( ((test ':' test | '**' expr) + (comp_for | (',' (test ':' test | '**' expr))* [','])) | + ((test | star_expr) + (comp_for | (',' (test | star_expr))* [','])) ) + +classdef: 'class' NAME ['(' [arglist] ')'] ':' suite + +arglist: argument (',' argument)* [','] + +# The reason that keywords are test nodes instead of NAME is that using NAME +# results in an ambiguity. ast.c makes sure it's a NAME. +# "test '=' test" is really "keyword '=' test", but we have no such token. +# These need to be in a single rule to avoid grammar that is ambiguous +# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, +# we explicitly match '*' here, too, to give it proper precedence. +# Illegal combinations and orderings are blocked in ast.c: +# multiple (test comp_for) arguments are blocked; keyword unpackings +# that precede iterable unpackings are blocked; etc. +argument: ( test [comp_for] | + test '=' test | + '**' test | + '*' test ) + +comp_iter: comp_for | comp_if +sync_comp_for: 'for' exprlist 'in' or_test [comp_iter] +comp_for: ['async'] sync_comp_for +comp_if: 'if' test_nocond [comp_iter] + +# not used in grammar, but may appear in "node" passed from Parser to Compiler +encoding_decl: NAME + +yield_expr: 'yield' [yield_arg] +yield_arg: 'from' test | testlist + +strings: (STRING | fstring)+ +fstring: FSTRING_START fstring_content* FSTRING_END +fstring_content: FSTRING_STRING | fstring_expr +fstring_conversion: '!' NAME +fstring_expr: '{' (testlist_comp | yield_expr) [ fstring_conversion ] [ fstring_format_spec ] '}' +fstring_format_spec: ':' fstring_content* diff --git a/bundle/jedi-vim/pythonx/parso/parso/python/grammar38.txt b/bundle/jedi-vim/pythonx/parso/parso/python/grammar38.txt new file mode 100644 index 000000000..7288d556f --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/python/grammar38.txt @@ -0,0 +1,171 @@ +# Grammar for Python + +# NOTE WELL: You should also follow all the steps listed at +# https://devguide.python.org/grammar/ + +# Start symbols for the grammar: +# single_input is a single interactive statement; +# file_input is a module or sequence of commands read from an input file; +# eval_input is the input for the eval() functions. +# NB: compound_stmt in single_input is followed by extra NEWLINE! +single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE +file_input: stmt* ENDMARKER +eval_input: testlist NEWLINE* ENDMARKER + +decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE +decorators: decorator+ +decorated: decorators (classdef | funcdef | async_funcdef) + +async_funcdef: 'async' funcdef +funcdef: 'def' NAME parameters ['->' test] ':' suite + +parameters: '(' [typedargslist] ')' +typedargslist: ( + (tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [',' [ tfpdef ['=' test] ( + ',' tfpdef ['=' test])* ([',' [ + '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']]]) + | '*' [tfpdef] (',' tfpdef ['=' test])* ([',' ['**' tfpdef [',']]]) + | '**' tfpdef [',']]] ) +| (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [ + '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']]] + | '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']) +) +tfpdef: NAME [':' test] +varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [ + '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [',']]] + | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [',']) ]] | (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [ + '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [',']]] + | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [','] +) +vfpdef: NAME + +stmt: simple_stmt | compound_stmt | NEWLINE +simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE +small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt | + import_stmt | global_stmt | nonlocal_stmt | assert_stmt) +expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | + ('=' (yield_expr|testlist_star_expr))*) +annassign: ':' test ['=' (yield_expr|testlist_star_expr)] +testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] +augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | + '<<=' | '>>=' | '**=' | '//=') +# For normal and annotated assignments, additional restrictions enforced by the interpreter +del_stmt: 'del' exprlist +pass_stmt: 'pass' +flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt +break_stmt: 'break' +continue_stmt: 'continue' +return_stmt: 'return' [testlist_star_expr] +yield_stmt: yield_expr +raise_stmt: 'raise' [test ['from' test]] +import_stmt: import_name | import_from +import_name: 'import' dotted_as_names +# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS +import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+) + 'import' ('*' | '(' import_as_names ')' | import_as_names)) +import_as_name: NAME ['as' NAME] +dotted_as_name: dotted_name ['as' NAME] +import_as_names: import_as_name (',' import_as_name)* [','] +dotted_as_names: dotted_as_name (',' dotted_as_name)* +dotted_name: NAME ('.' NAME)* +global_stmt: 'global' NAME (',' NAME)* +nonlocal_stmt: 'nonlocal' NAME (',' NAME)* +assert_stmt: 'assert' test [',' test] + +compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt +async_stmt: 'async' (funcdef | with_stmt | for_stmt) +if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite] +while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite] +for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] +try_stmt: ('try' ':' suite + ((except_clause ':' suite)+ + ['else' ':' suite] + ['finally' ':' suite] | + 'finally' ':' suite)) +with_stmt: 'with' with_item (',' with_item)* ':' suite +with_item: test ['as' expr] +# NB compile.c makes sure that the default except clause is last +except_clause: 'except' [test ['as' NAME]] +suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT + +namedexpr_test: test [':=' test] +test: or_test ['if' or_test 'else' test] | lambdef +test_nocond: or_test | lambdef_nocond +lambdef: 'lambda' [varargslist] ':' test +lambdef_nocond: 'lambda' [varargslist] ':' test_nocond +or_test: and_test ('or' and_test)* +and_test: not_test ('and' not_test)* +not_test: 'not' not_test | comparison +comparison: expr (comp_op expr)* +# <> isn't actually a valid comparison operator in Python. It's here for the +# sake of a __future__ import described in PEP 401 (which really works :-) +comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' +star_expr: '*' expr +expr: xor_expr ('|' xor_expr)* +xor_expr: and_expr ('^' and_expr)* +and_expr: shift_expr ('&' shift_expr)* +shift_expr: arith_expr (('<<'|'>>') arith_expr)* +arith_expr: term (('+'|'-') term)* +term: factor (('*'|'@'|'/'|'%'|'//') factor)* +factor: ('+'|'-'|'~') factor | power +power: atom_expr ['**' factor] +atom_expr: ['await'] atom trailer* +atom: ('(' [yield_expr|testlist_comp] ')' | + '[' [testlist_comp] ']' | + '{' [dictorsetmaker] '}' | + NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False') +testlist_comp: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] ) +trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME +subscriptlist: subscript (',' subscript)* [','] +subscript: test | [test] ':' [test] [sliceop] +sliceop: ':' [test] +exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] +testlist: test (',' test)* [','] +dictorsetmaker: ( ((test ':' test | '**' expr) + (comp_for | (',' (test ':' test | '**' expr))* [','])) | + ((test | star_expr) + (comp_for | (',' (test | star_expr))* [','])) ) + +classdef: 'class' NAME ['(' [arglist] ')'] ':' suite + +arglist: argument (',' argument)* [','] + +# The reason that keywords are test nodes instead of NAME is that using NAME +# results in an ambiguity. ast.c makes sure it's a NAME. +# "test '=' test" is really "keyword '=' test", but we have no such token. +# These need to be in a single rule to avoid grammar that is ambiguous +# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, +# we explicitly match '*' here, too, to give it proper precedence. +# Illegal combinations and orderings are blocked in ast.c: +# multiple (test comp_for) arguments are blocked; keyword unpackings +# that precede iterable unpackings are blocked; etc. +argument: ( test [comp_for] | + test ':=' test | + test '=' test | + '**' test | + '*' test ) + +comp_iter: comp_for | comp_if +sync_comp_for: 'for' exprlist 'in' or_test [comp_iter] +comp_for: ['async'] sync_comp_for +comp_if: 'if' test_nocond [comp_iter] + +# not used in grammar, but may appear in "node" passed from Parser to Compiler +encoding_decl: NAME + +yield_expr: 'yield' [yield_arg] +yield_arg: 'from' test | testlist_star_expr + +strings: (STRING | fstring)+ +fstring: FSTRING_START fstring_content* FSTRING_END +fstring_content: FSTRING_STRING | fstring_expr +fstring_conversion: '!' NAME +fstring_expr: '{' (testlist_comp | yield_expr) ['='] [ fstring_conversion ] [ fstring_format_spec ] '}' +fstring_format_spec: ':' fstring_content* diff --git a/bundle/jedi-vim/pythonx/parso/parso/python/grammar39.txt b/bundle/jedi-vim/pythonx/parso/parso/python/grammar39.txt new file mode 100644 index 000000000..ae46033cf --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/python/grammar39.txt @@ -0,0 +1,169 @@ +# Grammar for Python + +# NOTE WELL: You should also follow all the steps listed at +# https://devguide.python.org/grammar/ + +# Start symbols for the grammar: +# single_input is a single interactive statement; +# file_input is a module or sequence of commands read from an input file; +# eval_input is the input for the eval() functions. +# NB: compound_stmt in single_input is followed by extra NEWLINE! +single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE +file_input: stmt* ENDMARKER +eval_input: testlist NEWLINE* ENDMARKER + +decorator: '@' namedexpr_test NEWLINE +decorators: decorator+ +decorated: decorators (classdef | funcdef | async_funcdef) + +async_funcdef: 'async' funcdef +funcdef: 'def' NAME parameters ['->' test] ':' suite + +parameters: '(' [typedargslist] ')' +typedargslist: ( + (tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [',' [ tfpdef ['=' test] ( + ',' tfpdef ['=' test])* ([',' [ + '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']]]) + | '*' [tfpdef] (',' tfpdef ['=' test])* ([',' ['**' tfpdef [',']]]) + | '**' tfpdef [',']]] ) +| (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [ + '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']]] + | '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] + | '**' tfpdef [',']) +) +tfpdef: NAME [':' test] +varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [ + '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [',']]] + | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [',']) ]] | (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [ + '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [',']]] + | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] + | '**' vfpdef [','] +) +vfpdef: NAME + +stmt: simple_stmt | compound_stmt | NEWLINE +simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE +small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt | + import_stmt | global_stmt | nonlocal_stmt | assert_stmt) +expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | + ('=' (yield_expr|testlist_star_expr))*) +annassign: ':' test ['=' (yield_expr|testlist_star_expr)] +testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] +augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | + '<<=' | '>>=' | '**=' | '//=') +# For normal and annotated assignments, additional restrictions enforced by the interpreter +del_stmt: 'del' exprlist +pass_stmt: 'pass' +flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt +break_stmt: 'break' +continue_stmt: 'continue' +return_stmt: 'return' [testlist_star_expr] +yield_stmt: yield_expr +raise_stmt: 'raise' [test ['from' test]] +import_stmt: import_name | import_from +import_name: 'import' dotted_as_names +# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS +import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+) + 'import' ('*' | '(' import_as_names ')' | import_as_names)) +import_as_name: NAME ['as' NAME] +dotted_as_name: dotted_name ['as' NAME] +import_as_names: import_as_name (',' import_as_name)* [','] +dotted_as_names: dotted_as_name (',' dotted_as_name)* +dotted_name: NAME ('.' NAME)* +global_stmt: 'global' NAME (',' NAME)* +nonlocal_stmt: 'nonlocal' NAME (',' NAME)* +assert_stmt: 'assert' test [',' test] + +compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt +async_stmt: 'async' (funcdef | with_stmt | for_stmt) +if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite] +while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite] +for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] +try_stmt: ('try' ':' suite + ((except_clause ':' suite)+ + ['else' ':' suite] + ['finally' ':' suite] | + 'finally' ':' suite)) +with_stmt: 'with' with_item (',' with_item)* ':' suite +with_item: test ['as' expr] +# NB compile.c makes sure that the default except clause is last +except_clause: 'except' [test ['as' NAME]] +suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT + +namedexpr_test: test [':=' test] +test: or_test ['if' or_test 'else' test] | lambdef +lambdef: 'lambda' [varargslist] ':' test +or_test: and_test ('or' and_test)* +and_test: not_test ('and' not_test)* +not_test: 'not' not_test | comparison +comparison: expr (comp_op expr)* +# <> isn't actually a valid comparison operator in Python. It's here for the +# sake of a __future__ import described in PEP 401 (which really works :-) +comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' +star_expr: '*' expr +expr: xor_expr ('|' xor_expr)* +xor_expr: and_expr ('^' and_expr)* +and_expr: shift_expr ('&' shift_expr)* +shift_expr: arith_expr (('<<'|'>>') arith_expr)* +arith_expr: term (('+'|'-') term)* +term: factor (('*'|'@'|'/'|'%'|'//') factor)* +factor: ('+'|'-'|'~') factor | power +power: atom_expr ['**' factor] +atom_expr: ['await'] atom trailer* +atom: ('(' [yield_expr|testlist_comp] ')' | + '[' [testlist_comp] ']' | + '{' [dictorsetmaker] '}' | + NAME | NUMBER | strings | '...' | 'None' | 'True' | 'False') +testlist_comp: (namedexpr_test|star_expr) ( comp_for | (',' (namedexpr_test|star_expr))* [','] ) +trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME +subscriptlist: subscript (',' subscript)* [','] +subscript: test | [test] ':' [test] [sliceop] +sliceop: ':' [test] +exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] +testlist: test (',' test)* [','] +dictorsetmaker: ( ((test ':' test | '**' expr) + (comp_for | (',' (test ':' test | '**' expr))* [','])) | + ((test [':=' test] | star_expr) + (comp_for | (',' (test [':=' test] | star_expr))* [','])) ) + +classdef: 'class' NAME ['(' [arglist] ')'] ':' suite + +arglist: argument (',' argument)* [','] + +# The reason that keywords are test nodes instead of NAME is that using NAME +# results in an ambiguity. ast.c makes sure it's a NAME. +# "test '=' test" is really "keyword '=' test", but we have no such token. +# These need to be in a single rule to avoid grammar that is ambiguous +# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, +# we explicitly match '*' here, too, to give it proper precedence. +# Illegal combinations and orderings are blocked in ast.c: +# multiple (test comp_for) arguments are blocked; keyword unpackings +# that precede iterable unpackings are blocked; etc. +argument: ( test [comp_for] | + test ':=' test | + test '=' test | + '**' test | + '*' test ) + +comp_iter: comp_for | comp_if +sync_comp_for: 'for' exprlist 'in' or_test [comp_iter] +comp_for: ['async'] sync_comp_for +comp_if: 'if' or_test [comp_iter] + +# not used in grammar, but may appear in "node" passed from Parser to Compiler +encoding_decl: NAME + +yield_expr: 'yield' [yield_arg] +yield_arg: 'from' test | testlist_star_expr + +strings: (STRING | fstring)+ +fstring: FSTRING_START fstring_content* FSTRING_END +fstring_content: FSTRING_STRING | fstring_expr +fstring_conversion: '!' NAME +fstring_expr: '{' (testlist_comp | yield_expr) ['='] [ fstring_conversion ] [ fstring_format_spec ] '}' +fstring_format_spec: ':' fstring_content* diff --git a/bundle/jedi-vim/pythonx/parso/parso/python/issue_list.txt b/bundle/jedi-vim/pythonx/parso/parso/python/issue_list.txt new file mode 100644 index 000000000..4cd9ebfde --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/python/issue_list.txt @@ -0,0 +1,168 @@ +A list of syntax/indentation errors I've encountered in CPython. + +# Python/compile.c + "'continue' not properly in loop" + "'continue' not supported inside 'finally' clause" # Until loop + "default 'except:' must be last" + "from __future__ imports must occur at the beginning of the file" + "'return' outside function" + "'return' with value in async generator" + "'break' outside loop" + "two starred expressions in assignment" + "asynchronous comprehension outside of an asynchronous function" + "'yield' outside function" # For both yield and yield from + "'yield from' inside async function" + "'await' outside function" + "'await' outside async function" + "starred assignment target must be in a list or tuple" + "can't use starred expression here" + "too many statically nested blocks" # Max. 20 + # This is one of the few places in the cpython code base that I really + # don't understand. It feels a bit hacky if you look at the implementation + # of UNPACK_EX. + "too many expressions in star-unpacking assignment" + + # Just ignore this one, newer versions will not be affected anymore and + # it's a limit of 2^16 - 1. + +# Python/ast.c + # used with_item exprlist expr_stmt + "can't %s %s" % ("assign to" or "delete", + "lambda" + "function call" # foo() + "generator expression" + "list comprehension" + "set comprehension" + "dict comprehension" + "keyword" + "Ellipsis" + "comparison" + Dict: Set: Num: Str: Bytes: JoinedStr: FormattedValue: + "literal" + BoolOp: BinOp: UnaryOp: + "operator" + Yield: YieldFrom: + "yield expression" + Await: + "await expression" + IfExp: + "conditional expression" + "assignment to keyword" # (keywords + __debug__) # None = 2 + "named arguments must follow bare *" # def foo(*): pass + "non-default argument follows default argument" # def f(x=3, y): pass + "iterable unpacking cannot be used in comprehension" # [*[] for a in [1]] + "dict unpacking cannot be used in dict comprehension" # {**{} for a in [1]} + "Generator expression must be parenthesized if not sole argument" # foo(x for x in [], b) + "positional argument follows keyword argument unpacking" # f(**x, y) + "positional argument follows keyword argument" # f(x=2, y) + "iterable argument unpacking follows keyword argument unpacking" # foo(**kwargs, *args) + "lambda cannot contain assignment" # f(lambda: 1=1) + "keyword can't be an expression" # f(+x=1) + "keyword argument repeated" # f(x=1, x=2) + "illegal expression for augmented assignment" # x, y += 1 + "only single target (not list) can be annotated" # [x, y]: int + "only single target (not tuple) can be annotated" # x, y: str + "illegal target for annotation" # True: 1` + "trailing comma not allowed without surrounding parentheses" # from foo import a, + "bytes can only contain ASCII literal characters." # b'ä' # prob. only python 3 + "cannot mix bytes and nonbytes literals" # 's' b'' + "assignment to yield expression not possible" # x = yield 1 = 3 + + "f-string: empty expression not allowed" # f'{}' + "f-string: single '}' is not allowed" # f'}' + "f-string: expressions nested too deeply" # f'{1:{5:{3}}}' + "f-string expression part cannot include a backslash" # f'{"\"}' or f'{"\\"}' + "f-string expression part cannot include '#'" # f'{#}' + "f-string: unterminated string" # f'{"}' + "f-string: mismatched '(', '{', or '['" + "f-string: invalid conversion character: expected 's', 'r', or 'a'" # f'{1!b}' + "f-string: unexpected end of string" # Doesn't really happen?! + "f-string: expecting '}'" # f'{' + "(unicode error) unknown error + "(value error) unknown error + "(unicode error) MESSAGE + MESSAGES = { + "\\ at end of string" + "truncated \\xXX escape" + "truncated \\uXXXX escape" + "truncated \\UXXXXXXXX escape" + "illegal Unicode character" # '\Uffffffff' + "malformed \\N character escape" # '\N{}' + "unknown Unicode character name" # '\N{foo}' + } + "(value error) MESSAGE # bytes + MESSAGES = { + "Trailing \\ in string" + "invalid \\x escape at position %d" + } + + "invalid escape sequence \\%c" # Only happens when used in `python -W error` + "unexpected node" # Probably irrelevant + "Unexpected node-type in from-import" # Irrelevant, doesn't happen. + "malformed 'try' statement" # Irrelevant, doesn't happen. + +# Python/symtable.c + "duplicate argument '%U' in function definition" + "name '%U' is assigned to before global declaration" + "name '%U' is assigned to before nonlocal declaration" + "name '%U' is used prior to global declaration" + "name '%U' is used prior to nonlocal declaration" + "annotated name '%U' can't be global" + "annotated name '%U' can't be nonlocal" + "import * only allowed at module level" + + "name '%U' is parameter and global", + "name '%U' is nonlocal and global", + "name '%U' is parameter and nonlocal", + + "nonlocal declaration not allowed at module level"); + "no binding for nonlocal '%U' found", + # RecursionError. Not handled. For all human written code, this is probably + # not an issue. eval("()"*x) with x>=2998 for example fails, but that's + # more than 2000 executions on one line. + "maximum recursion depth exceeded during compilation"); + +# Python/future.c + "not a chance" + "future feature %.100s is not defined" + "from __future__ imports must occur at the beginning of the file" # Also in compile.c + +# Parser/tokenizer.c + # All the following issues seem to be irrelevant for parso, because the + # encoding stuff is done before it reaches the tokenizer. It's already + # unicode at that point. + "encoding problem: %s" + "encoding problem: %s with BOM" + "Non-UTF-8 code starting with '\\x%.2x' in file %U on line %i, but no encoding declared; see http://python.org/dev/peps/pep-0263/ for details" + +# Parser/pythonrun.c + E_SYNTAX: "invalid syntax" + E_LINECONT: "unexpected character after line continuation character" + E_IDENTIFIER: "invalid character in identifier" + # Also just use 'invalid syntax'. Happens mostly with stuff like `(`. This + # message doesn't really help the user, because it only appears very + # randomly, e.g. `(or` wouldn't yield this error. + E_EOF: "unexpected EOF while parsing" + # Even in 3.6 this is implemented kind of shaky. Not implemented, I think + # cPython needs to fix this one first. + # e.g. `ast.parse('def x():\n\t if 1:\n \t \tpass')` works :/ + E_TABSPACE: "inconsistent use of tabs and spaces in indentation" + # Ignored, just shown as "invalid syntax". The error has mostly to do with + # numbers like 0b2 everywhere or 1.6_ in Python3.6. + E_TOKEN: "invalid token" + E_EOFS: "EOF while scanning triple-quoted string literal" + E_EOLS: "EOL while scanning string literal" + + # IndentationError + E_DEDENT: "unindent does not match any outer indentation level" + E_TOODEEP: "too many levels of indentation" # 100 levels + E_SYNTAX: "expected an indented block" + "unexpected indent" + # I don't think this actually ever happens. + "unexpected unindent" + + + # Irrelevant for parso for now. + E_OVERFLOW: "expression too long" + E_DECODE: "unknown decode error" + E_BADSINGLE: "multiple statements found while compiling a single statement" diff --git a/bundle/jedi-vim/pythonx/parso/parso/python/parser.py b/bundle/jedi-vim/pythonx/parso/parso/python/parser.py new file mode 100644 index 000000000..fa45e8b15 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/python/parser.py @@ -0,0 +1,206 @@ +from parso.python import tree +from parso.python.token import PythonTokenTypes +from parso.parser import BaseParser + + +NAME = PythonTokenTypes.NAME +INDENT = PythonTokenTypes.INDENT +DEDENT = PythonTokenTypes.DEDENT + + +class Parser(BaseParser): + """ + This class is used to parse a Python file, it then divides them into a + class structure of different scopes. + + :param pgen_grammar: The grammar object of pgen2. Loaded by load_grammar. + """ + + node_map = { + 'expr_stmt': tree.ExprStmt, + 'classdef': tree.Class, + 'funcdef': tree.Function, + 'file_input': tree.Module, + 'import_name': tree.ImportName, + 'import_from': tree.ImportFrom, + 'break_stmt': tree.KeywordStatement, + 'continue_stmt': tree.KeywordStatement, + 'return_stmt': tree.ReturnStmt, + 'raise_stmt': tree.KeywordStatement, + 'yield_expr': tree.YieldExpr, + 'del_stmt': tree.KeywordStatement, + 'pass_stmt': tree.KeywordStatement, + 'global_stmt': tree.GlobalStmt, + 'nonlocal_stmt': tree.KeywordStatement, + 'print_stmt': tree.KeywordStatement, + 'assert_stmt': tree.AssertStmt, + 'if_stmt': tree.IfStmt, + 'with_stmt': tree.WithStmt, + 'for_stmt': tree.ForStmt, + 'while_stmt': tree.WhileStmt, + 'try_stmt': tree.TryStmt, + 'sync_comp_for': tree.SyncCompFor, + # Not sure if this is the best idea, but IMO it's the easiest way to + # avoid extreme amounts of work around the subtle difference of 2/3 + # grammar in list comoprehensions. + 'decorator': tree.Decorator, + 'lambdef': tree.Lambda, + 'lambdef_nocond': tree.Lambda, + 'namedexpr_test': tree.NamedExpr, + } + default_node = tree.PythonNode + + # Names/Keywords are handled separately + _leaf_map = { + PythonTokenTypes.STRING: tree.String, + PythonTokenTypes.NUMBER: tree.Number, + PythonTokenTypes.NEWLINE: tree.Newline, + PythonTokenTypes.ENDMARKER: tree.EndMarker, + PythonTokenTypes.FSTRING_STRING: tree.FStringString, + PythonTokenTypes.FSTRING_START: tree.FStringStart, + PythonTokenTypes.FSTRING_END: tree.FStringEnd, + } + + def __init__(self, pgen_grammar, error_recovery=True, start_nonterminal='file_input'): + super().__init__(pgen_grammar, start_nonterminal, + error_recovery=error_recovery) + + self.syntax_errors = [] + self._omit_dedent_list = [] + self._indent_counter = 0 + + def parse(self, tokens): + if self._error_recovery: + if self._start_nonterminal != 'file_input': + raise NotImplementedError + + tokens = self._recovery_tokenize(tokens) + + return super().parse(tokens) + + def convert_node(self, nonterminal, children): + """ + Convert raw node information to a PythonBaseNode instance. + + This is passed to the parser driver which calls it whenever a reduction of a + grammar rule produces a new complete node, so that the tree is build + strictly bottom-up. + """ + try: + node = self.node_map[nonterminal](children) + except KeyError: + if nonterminal == 'suite': + # We don't want the INDENT/DEDENT in our parser tree. Those + # leaves are just cancer. They are virtual leaves and not real + # ones and therefore have pseudo start/end positions and no + # prefixes. Just ignore them. + children = [children[0]] + children[2:-1] + node = self.default_node(nonterminal, children) + return node + + def convert_leaf(self, type, value, prefix, start_pos): + # print('leaf', repr(value), token.tok_name[type]) + if type == NAME: + if value in self._pgen_grammar.reserved_syntax_strings: + return tree.Keyword(value, start_pos, prefix) + else: + return tree.Name(value, start_pos, prefix) + + return self._leaf_map.get(type, tree.Operator)(value, start_pos, prefix) + + def error_recovery(self, token): + tos_nodes = self.stack[-1].nodes + if tos_nodes: + last_leaf = tos_nodes[-1].get_last_leaf() + else: + last_leaf = None + + if self._start_nonterminal == 'file_input' and \ + (token.type == PythonTokenTypes.ENDMARKER + or token.type == DEDENT and not last_leaf.value.endswith('\n') + and not last_leaf.value.endswith('\r')): + # In Python statements need to end with a newline. But since it's + # possible (and valid in Python) that there's no newline at the + # end of a file, we have to recover even if the user doesn't want + # error recovery. + if self.stack[-1].dfa.from_rule == 'simple_stmt': + try: + plan = self.stack[-1].dfa.transitions[PythonTokenTypes.NEWLINE] + except KeyError: + pass + else: + if plan.next_dfa.is_final and not plan.dfa_pushes: + # We are ignoring here that the newline would be + # required for a simple_stmt. + self.stack[-1].dfa = plan.next_dfa + self._add_token(token) + return + + if not self._error_recovery: + return super().error_recovery(token) + + def current_suite(stack): + # For now just discard everything that is not a suite or + # file_input, if we detect an error. + for until_index, stack_node in reversed(list(enumerate(stack))): + # `suite` can sometimes be only simple_stmt, not stmt. + if stack_node.nonterminal == 'file_input': + break + elif stack_node.nonterminal == 'suite': + # In the case where we just have a newline we don't want to + # do error recovery here. In all other cases, we want to do + # error recovery. + if len(stack_node.nodes) != 1: + break + return until_index + + until_index = current_suite(self.stack) + + if self._stack_removal(until_index + 1): + self._add_token(token) + else: + typ, value, start_pos, prefix = token + if typ == INDENT: + # For every deleted INDENT we have to delete a DEDENT as well. + # Otherwise the parser will get into trouble and DEDENT too early. + self._omit_dedent_list.append(self._indent_counter) + + error_leaf = tree.PythonErrorLeaf(typ.name, value, start_pos, prefix) + self.stack[-1].nodes.append(error_leaf) + + tos = self.stack[-1] + if tos.nonterminal == 'suite': + # Need at least one statement in the suite. This happend with the + # error recovery above. + try: + tos.dfa = tos.dfa.arcs['stmt'] + except KeyError: + # We're already in a final state. + pass + + def _stack_removal(self, start_index): + all_nodes = [node for stack_node in self.stack[start_index:] for node in stack_node.nodes] + + if all_nodes: + node = tree.PythonErrorNode(all_nodes) + self.stack[start_index - 1].nodes.append(node) + + self.stack[start_index:] = [] + return bool(all_nodes) + + def _recovery_tokenize(self, tokens): + for token in tokens: + typ = token[0] + if typ == DEDENT: + # We need to count indents, because if we just omit any DEDENT, + # we might omit them in the wrong place. + o = self._omit_dedent_list + if o and o[-1] == self._indent_counter: + o.pop() + self._indent_counter -= 1 + continue + + self._indent_counter -= 1 + elif typ == INDENT: + self._indent_counter += 1 + yield token diff --git a/bundle/jedi-vim/pythonx/parso/parso/python/pep8.py b/bundle/jedi-vim/pythonx/parso/parso/python/pep8.py new file mode 100644 index 000000000..c492dae68 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/python/pep8.py @@ -0,0 +1,767 @@ +import re +from contextlib import contextmanager +from typing import Tuple + +from parso.python.errors import ErrorFinder, ErrorFinderConfig +from parso.normalizer import Rule +from parso.python.tree import Flow, Scope + + +_IMPORT_TYPES = ('import_name', 'import_from') +_SUITE_INTRODUCERS = ('classdef', 'funcdef', 'if_stmt', 'while_stmt', + 'for_stmt', 'try_stmt', 'with_stmt') +_NON_STAR_TYPES = ('term', 'import_from', 'power') +_OPENING_BRACKETS = '(', '[', '{' +_CLOSING_BRACKETS = ')', ']', '}' +_FACTOR = '+', '-', '~' +_ALLOW_SPACE = '*', '+', '-', '**', '/', '//', '@' +_BITWISE_OPERATOR = '<<', '>>', '|', '&', '^' +_NEEDS_SPACE: Tuple[str, ...] = ( + '=', '%', '->', + '<', '>', '==', '>=', '<=', '<>', '!=', + '+=', '-=', '*=', '@=', '/=', '%=', '&=', '|=', '^=', '<<=', + '>>=', '**=', '//=') +_NEEDS_SPACE += _BITWISE_OPERATOR +_IMPLICIT_INDENTATION_TYPES = ('dictorsetmaker', 'argument') +_POSSIBLE_SLICE_PARENTS = ('subscript', 'subscriptlist', 'sliceop') + + +class IndentationTypes: + VERTICAL_BRACKET = object() + HANGING_BRACKET = object() + BACKSLASH = object() + SUITE = object() + IMPLICIT = object() + + +class IndentationNode(object): + type = IndentationTypes.SUITE + + def __init__(self, config, indentation, parent=None): + self.bracket_indentation = self.indentation = indentation + self.parent = parent + + def __repr__(self): + return '<%s>' % self.__class__.__name__ + + def get_latest_suite_node(self): + n = self + while n is not None: + if n.type == IndentationTypes.SUITE: + return n + + n = n.parent + + +class BracketNode(IndentationNode): + def __init__(self, config, leaf, parent, in_suite_introducer=False): + self.leaf = leaf + + # Figure out here what the indentation is. For chained brackets + # we can basically use the previous indentation. + previous_leaf = leaf + n = parent + if n.type == IndentationTypes.IMPLICIT: + n = n.parent + while True: + if hasattr(n, 'leaf') and previous_leaf.line != n.leaf.line: + break + + previous_leaf = previous_leaf.get_previous_leaf() + if not isinstance(n, BracketNode) or previous_leaf != n.leaf: + break + n = n.parent + parent_indentation = n.indentation + + next_leaf = leaf.get_next_leaf() + if '\n' in next_leaf.prefix or '\r' in next_leaf.prefix: + # This implies code like: + # foobarbaz( + # a, + # b, + # ) + self.bracket_indentation = parent_indentation \ + + config.closing_bracket_hanging_indentation + self.indentation = parent_indentation + config.indentation + self.type = IndentationTypes.HANGING_BRACKET + else: + # Implies code like: + # foobarbaz( + # a, + # b, + # ) + expected_end_indent = leaf.end_pos[1] + if '\t' in config.indentation: + self.indentation = None + else: + self.indentation = ' ' * expected_end_indent + self.bracket_indentation = self.indentation + self.type = IndentationTypes.VERTICAL_BRACKET + + if in_suite_introducer and parent.type == IndentationTypes.SUITE \ + and self.indentation == parent_indentation + config.indentation: + self.indentation += config.indentation + # The closing bracket should have the same indentation. + self.bracket_indentation = self.indentation + self.parent = parent + + +class ImplicitNode(BracketNode): + """ + Implicit indentation after keyword arguments, default arguments, + annotations and dict values. + """ + def __init__(self, config, leaf, parent): + super().__init__(config, leaf, parent) + self.type = IndentationTypes.IMPLICIT + + next_leaf = leaf.get_next_leaf() + if leaf == ':' and '\n' not in next_leaf.prefix and '\r' not in next_leaf.prefix: + self.indentation += ' ' + + +class BackslashNode(IndentationNode): + type = IndentationTypes.BACKSLASH + + def __init__(self, config, parent_indentation, containing_leaf, spacing, parent=None): + expr_stmt = containing_leaf.search_ancestor('expr_stmt') + if expr_stmt is not None: + equals = expr_stmt.children[-2] + + if '\t' in config.indentation: + # TODO unite with the code of BracketNode + self.indentation = None + else: + # If the backslash follows the equals, use normal indentation + # otherwise it should align with the equals. + if equals.end_pos == spacing.start_pos: + self.indentation = parent_indentation + config.indentation + else: + # +1 because there is a space. + self.indentation = ' ' * (equals.end_pos[1] + 1) + else: + self.indentation = parent_indentation + config.indentation + self.bracket_indentation = self.indentation + self.parent = parent + + +def _is_magic_name(name): + return name.value.startswith('__') and name.value.endswith('__') + + +class PEP8Normalizer(ErrorFinder): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._previous_part = None + self._previous_leaf = None + self._on_newline = True + self._newline_count = 0 + self._wanted_newline_count = None + self._max_new_lines_in_prefix = 0 + self._new_statement = True + self._implicit_indentation_possible = False + # The top of stack of the indentation nodes. + self._indentation_tos = self._last_indentation_tos = \ + IndentationNode(self._config, indentation='') + self._in_suite_introducer = False + + if ' ' in self._config.indentation: + self._indentation_type = 'spaces' + self._wrong_indentation_char = '\t' + else: + self._indentation_type = 'tabs' + self._wrong_indentation_char = ' ' + + @contextmanager + def visit_node(self, node): + with super().visit_node(node): + with self._visit_node(node): + yield + + @contextmanager + def _visit_node(self, node): + typ = node.type + + if typ in 'import_name': + names = node.get_defined_names() + if len(names) > 1: + for name in names[:1]: + self.add_issue(name, 401, 'Multiple imports on one line') + elif typ == 'lambdef': + expr_stmt = node.parent + # Check if it's simply defining a single name, not something like + # foo.bar or x[1], where using a lambda could make more sense. + if expr_stmt.type == 'expr_stmt' and any(n.type == 'name' + for n in expr_stmt.children[:-2:2]): + self.add_issue(node, 731, 'Do not assign a lambda expression, use a def') + elif typ == 'try_stmt': + for child in node.children: + # Here we can simply check if it's an except, because otherwise + # it would be an except_clause. + if child.type == 'keyword' and child.value == 'except': + self.add_issue(child, 722, 'Do not use bare except, specify exception instead') + elif typ == 'comparison': + for child in node.children: + if child.type not in ('atom_expr', 'power'): + continue + if len(child.children) > 2: + continue + trailer = child.children[1] + atom = child.children[0] + if trailer.type == 'trailer' and atom.type == 'name' \ + and atom.value == 'type': + self.add_issue(node, 721, "Do not compare types, use 'isinstance()") + break + elif typ == 'file_input': + endmarker = node.children[-1] + prev = endmarker.get_previous_leaf() + prefix = endmarker.prefix + if (not prefix.endswith('\n') and not prefix.endswith('\r') and ( + prefix or prev is None or prev.value not in {'\n', '\r\n', '\r'})): + self.add_issue(endmarker, 292, "No newline at end of file") + + if typ in _IMPORT_TYPES: + simple_stmt = node.parent + module = simple_stmt.parent + if module.type == 'file_input': + index = module.children.index(simple_stmt) + for child in module.children[:index]: + children = [child] + if child.type == 'simple_stmt': + # Remove the newline. + children = child.children[:-1] + + found_docstring = False + for c in children: + if c.type == 'string' and not found_docstring: + continue + found_docstring = True + + if c.type == 'expr_stmt' and \ + all(_is_magic_name(n) for n in c.get_defined_names()): + continue + + if c.type in _IMPORT_TYPES or isinstance(c, Flow): + continue + + self.add_issue(node, 402, 'Module level import not at top of file') + break + else: + continue + break + + implicit_indentation_possible = typ in _IMPLICIT_INDENTATION_TYPES + in_introducer = typ in _SUITE_INTRODUCERS + if in_introducer: + self._in_suite_introducer = True + elif typ == 'suite': + if self._indentation_tos.type == IndentationTypes.BACKSLASH: + self._indentation_tos = self._indentation_tos.parent + + self._indentation_tos = IndentationNode( + self._config, + self._indentation_tos.indentation + self._config.indentation, + parent=self._indentation_tos + ) + elif implicit_indentation_possible: + self._implicit_indentation_possible = True + yield + if typ == 'suite': + assert self._indentation_tos.type == IndentationTypes.SUITE + self._indentation_tos = self._indentation_tos.parent + # If we dedent, no lines are needed anymore. + self._wanted_newline_count = None + elif implicit_indentation_possible: + self._implicit_indentation_possible = False + if self._indentation_tos.type == IndentationTypes.IMPLICIT: + self._indentation_tos = self._indentation_tos.parent + elif in_introducer: + self._in_suite_introducer = False + if typ in ('classdef', 'funcdef'): + self._wanted_newline_count = self._get_wanted_blank_lines_count() + + def _check_tabs_spaces(self, spacing): + if self._wrong_indentation_char in spacing.value: + self.add_issue(spacing, 101, 'Indentation contains ' + self._indentation_type) + return True + return False + + def _get_wanted_blank_lines_count(self): + suite_node = self._indentation_tos.get_latest_suite_node() + return int(suite_node.parent is None) + 1 + + def _reset_newlines(self, spacing, leaf, is_comment=False): + self._max_new_lines_in_prefix = \ + max(self._max_new_lines_in_prefix, self._newline_count) + + wanted = self._wanted_newline_count + if wanted is not None: + # Need to substract one + blank_lines = self._newline_count - 1 + if wanted > blank_lines and leaf.type != 'endmarker': + # In case of a comment we don't need to add the issue, yet. + if not is_comment: + # TODO end_pos wrong. + code = 302 if wanted == 2 else 301 + message = "expected %s blank line, found %s" \ + % (wanted, blank_lines) + self.add_issue(spacing, code, message) + self._wanted_newline_count = None + else: + self._wanted_newline_count = None + + if not is_comment: + wanted = self._get_wanted_blank_lines_count() + actual = self._max_new_lines_in_prefix - 1 + + val = leaf.value + needs_lines = ( + val == '@' and leaf.parent.type == 'decorator' + or ( + val == 'class' + or val == 'async' and leaf.get_next_leaf() == 'def' + or val == 'def' and self._previous_leaf != 'async' + ) and leaf.parent.parent.type != 'decorated' + ) + if needs_lines and actual < wanted: + func_or_cls = leaf.parent + suite = func_or_cls.parent + if suite.type == 'decorated': + suite = suite.parent + + # The first leaf of a file or a suite should not need blank + # lines. + if suite.children[int(suite.type == 'suite')] != func_or_cls: + code = 302 if wanted == 2 else 301 + message = "expected %s blank line, found %s" \ + % (wanted, actual) + self.add_issue(spacing, code, message) + + self._max_new_lines_in_prefix = 0 + + self._newline_count = 0 + + def visit_leaf(self, leaf): + super().visit_leaf(leaf) + for part in leaf._split_prefix(): + if part.type == 'spacing': + # This part is used for the part call after for. + break + self._visit_part(part, part.create_spacing_part(), leaf) + + self._analyse_non_prefix(leaf) + self._visit_part(leaf, part, leaf) + + # Cleanup + self._last_indentation_tos = self._indentation_tos + + self._new_statement = leaf.type == 'newline' + + # TODO does this work? with brackets and stuff? + if leaf.type == 'newline' and \ + self._indentation_tos.type == IndentationTypes.BACKSLASH: + self._indentation_tos = self._indentation_tos.parent + + if leaf.value == ':' and leaf.parent.type in _SUITE_INTRODUCERS: + self._in_suite_introducer = False + elif leaf.value == 'elif': + self._in_suite_introducer = True + + if not self._new_statement: + self._reset_newlines(part, leaf) + self._max_blank_lines = 0 + + self._previous_leaf = leaf + + return leaf.value + + def _visit_part(self, part, spacing, leaf): + value = part.value + type_ = part.type + if type_ == 'error_leaf': + return + + if value == ',' and part.parent.type == 'dictorsetmaker': + self._indentation_tos = self._indentation_tos.parent + + node = self._indentation_tos + + if type_ == 'comment': + if value.startswith('##'): + # Whole blocks of # should not raise an error. + if value.lstrip('#'): + self.add_issue(part, 266, "Too many leading '#' for block comment.") + elif self._on_newline: + if not re.match(r'#:? ', value) and not value == '#' \ + and not (value.startswith('#!') and part.start_pos == (1, 0)): + self.add_issue(part, 265, "Block comment should start with '# '") + else: + if not re.match(r'#:? [^ ]', value): + self.add_issue(part, 262, "Inline comment should start with '# '") + + self._reset_newlines(spacing, leaf, is_comment=True) + elif type_ == 'newline': + if self._newline_count > self._get_wanted_blank_lines_count(): + self.add_issue(part, 303, "Too many blank lines (%s)" % self._newline_count) + elif leaf in ('def', 'class') \ + and leaf.parent.parent.type == 'decorated': + self.add_issue(part, 304, "Blank lines found after function decorator") + + self._newline_count += 1 + + if type_ == 'backslash': + # TODO is this enough checking? What about ==? + if node.type != IndentationTypes.BACKSLASH: + if node.type != IndentationTypes.SUITE: + self.add_issue(part, 502, 'The backslash is redundant between brackets') + else: + indentation = node.indentation + if self._in_suite_introducer and node.type == IndentationTypes.SUITE: + indentation += self._config.indentation + + self._indentation_tos = BackslashNode( + self._config, + indentation, + part, + spacing, + parent=self._indentation_tos + ) + elif self._on_newline: + indentation = spacing.value + if node.type == IndentationTypes.BACKSLASH \ + and self._previous_part.type == 'newline': + self._indentation_tos = self._indentation_tos.parent + + if not self._check_tabs_spaces(spacing): + should_be_indentation = node.indentation + if type_ == 'comment': + # Comments can be dedented. So we have to care for that. + n = self._last_indentation_tos + while True: + if len(indentation) > len(n.indentation): + break + + should_be_indentation = n.indentation + + self._last_indentation_tos = n + if n == node: + break + n = n.parent + + if self._new_statement: + if type_ == 'newline': + if indentation: + self.add_issue(spacing, 291, 'Trailing whitespace') + elif indentation != should_be_indentation: + s = '%s %s' % (len(self._config.indentation), self._indentation_type) + self.add_issue(part, 111, 'Indentation is not a multiple of ' + s) + else: + if value in '])}': + should_be_indentation = node.bracket_indentation + else: + should_be_indentation = node.indentation + if self._in_suite_introducer and indentation == \ + node.get_latest_suite_node().indentation \ + + self._config.indentation: + self.add_issue(part, 129, "Line with same indent as next logical block") + elif indentation != should_be_indentation: + if not self._check_tabs_spaces(spacing) and part.value not in \ + {'\n', '\r\n', '\r'}: + if value in '])}': + if node.type == IndentationTypes.VERTICAL_BRACKET: + self.add_issue( + part, + 124, + "Closing bracket does not match visual indentation" + ) + else: + self.add_issue( + part, + 123, + "Losing bracket does not match " + "indentation of opening bracket's line" + ) + else: + if len(indentation) < len(should_be_indentation): + if node.type == IndentationTypes.VERTICAL_BRACKET: + self.add_issue( + part, + 128, + 'Continuation line under-indented for visual indent' + ) + elif node.type == IndentationTypes.BACKSLASH: + self.add_issue( + part, + 122, + 'Continuation line missing indentation or outdented' + ) + elif node.type == IndentationTypes.IMPLICIT: + self.add_issue(part, 135, 'xxx') + else: + self.add_issue( + part, + 121, + 'Continuation line under-indented for hanging indent' + ) + else: + if node.type == IndentationTypes.VERTICAL_BRACKET: + self.add_issue( + part, + 127, + 'Continuation line over-indented for visual indent' + ) + elif node.type == IndentationTypes.IMPLICIT: + self.add_issue(part, 136, 'xxx') + else: + self.add_issue( + part, + 126, + 'Continuation line over-indented for hanging indent' + ) + else: + self._check_spacing(part, spacing) + + self._check_line_length(part, spacing) + # ------------------------------- + # Finalizing. Updating the state. + # ------------------------------- + if value and value in '()[]{}' and type_ != 'error_leaf' \ + and part.parent.type != 'error_node': + if value in _OPENING_BRACKETS: + self._indentation_tos = BracketNode( + self._config, part, + parent=self._indentation_tos, + in_suite_introducer=self._in_suite_introducer + ) + else: + assert node.type != IndentationTypes.IMPLICIT + self._indentation_tos = self._indentation_tos.parent + elif value in ('=', ':') and self._implicit_indentation_possible \ + and part.parent.type in _IMPLICIT_INDENTATION_TYPES: + indentation = node.indentation + self._indentation_tos = ImplicitNode( + self._config, part, parent=self._indentation_tos + ) + + self._on_newline = type_ in ('newline', 'backslash', 'bom') + + self._previous_part = part + self._previous_spacing = spacing + + def _check_line_length(self, part, spacing): + if part.type == 'backslash': + last_column = part.start_pos[1] + 1 + else: + last_column = part.end_pos[1] + if last_column > self._config.max_characters \ + and spacing.start_pos[1] <= self._config.max_characters: + # Special case for long URLs in multi-line docstrings or comments, + # but still report the error when the 72 first chars are whitespaces. + report = True + if part.type == 'comment': + splitted = part.value[1:].split() + if len(splitted) == 1 \ + and (part.end_pos[1] - len(splitted[0])) < 72: + report = False + if report: + self.add_issue( + part, + 501, + 'Line too long (%s > %s characters)' % + (last_column, self._config.max_characters), + ) + + def _check_spacing(self, part, spacing): + def add_if_spaces(*args): + if spaces: + return self.add_issue(*args) + + def add_not_spaces(*args): + if not spaces: + return self.add_issue(*args) + + spaces = spacing.value + prev = self._previous_part + if prev is not None and prev.type == 'error_leaf' or part.type == 'error_leaf': + return + + type_ = part.type + if '\t' in spaces: + self.add_issue(spacing, 223, 'Used tab to separate tokens') + elif type_ == 'comment': + if len(spaces) < self._config.spaces_before_comment: + self.add_issue(spacing, 261, 'At least two spaces before inline comment') + elif type_ == 'newline': + add_if_spaces(spacing, 291, 'Trailing whitespace') + elif len(spaces) > 1: + self.add_issue(spacing, 221, 'Multiple spaces used') + else: + if prev in _OPENING_BRACKETS: + message = "Whitespace after '%s'" % part.value + add_if_spaces(spacing, 201, message) + elif part in _CLOSING_BRACKETS: + message = "Whitespace before '%s'" % part.value + add_if_spaces(spacing, 202, message) + elif part in (',', ';') or part == ':' \ + and part.parent.type not in _POSSIBLE_SLICE_PARENTS: + message = "Whitespace before '%s'" % part.value + add_if_spaces(spacing, 203, message) + elif prev == ':' and prev.parent.type in _POSSIBLE_SLICE_PARENTS: + pass # TODO + elif prev in (',', ';', ':'): + add_not_spaces(spacing, 231, "missing whitespace after '%s'") + elif part == ':': # Is a subscript + # TODO + pass + elif part in ('*', '**') and part.parent.type not in _NON_STAR_TYPES \ + or prev in ('*', '**') \ + and prev.parent.type not in _NON_STAR_TYPES: + # TODO + pass + elif prev in _FACTOR and prev.parent.type == 'factor': + pass + elif prev == '@' and prev.parent.type == 'decorator': + pass # TODO should probably raise an error if there's a space here + elif part in _NEEDS_SPACE or prev in _NEEDS_SPACE: + if part == '=' and part.parent.type in ('argument', 'param') \ + or prev == '=' and prev.parent.type in ('argument', 'param'): + if part == '=': + param = part.parent + else: + param = prev.parent + if param.type == 'param' and param.annotation: + add_not_spaces(spacing, 252, 'Expected spaces around annotation equals') + else: + add_if_spaces( + spacing, + 251, + 'Unexpected spaces around keyword / parameter equals' + ) + elif part in _BITWISE_OPERATOR or prev in _BITWISE_OPERATOR: + add_not_spaces( + spacing, + 227, + 'Missing whitespace around bitwise or shift operator' + ) + elif part == '%' or prev == '%': + add_not_spaces(spacing, 228, 'Missing whitespace around modulo operator') + else: + message_225 = 'Missing whitespace between tokens' + add_not_spaces(spacing, 225, message_225) + elif type_ == 'keyword' or prev.type == 'keyword': + add_not_spaces(spacing, 275, 'Missing whitespace around keyword') + else: + prev_spacing = self._previous_spacing + if prev in _ALLOW_SPACE and spaces != prev_spacing.value \ + and '\n' not in self._previous_leaf.prefix \ + and '\r' not in self._previous_leaf.prefix: + message = "Whitespace before operator doesn't match with whitespace after" + self.add_issue(spacing, 229, message) + + if spaces and part not in _ALLOW_SPACE and prev not in _ALLOW_SPACE: + message_225 = 'Missing whitespace between tokens' + # self.add_issue(spacing, 225, message_225) + # TODO why only brackets? + if part in _OPENING_BRACKETS: + message = "Whitespace before '%s'" % part.value + add_if_spaces(spacing, 211, message) + + def _analyse_non_prefix(self, leaf): + typ = leaf.type + if typ == 'name' and leaf.value in ('l', 'O', 'I'): + if leaf.is_definition(): + message = "Do not define %s named 'l', 'O', or 'I' one line" + if leaf.parent.type == 'class' and leaf.parent.name == leaf: + self.add_issue(leaf, 742, message % 'classes') + elif leaf.parent.type == 'function' and leaf.parent.name == leaf: + self.add_issue(leaf, 743, message % 'function') + else: + self.add_issuadd_issue(741, message % 'variables', leaf) + elif leaf.value == ':': + if isinstance(leaf.parent, (Flow, Scope)) and leaf.parent.type != 'lambdef': + next_leaf = leaf.get_next_leaf() + if next_leaf.type != 'newline': + if leaf.parent.type == 'funcdef': + self.add_issue(next_leaf, 704, 'Multiple statements on one line (def)') + else: + self.add_issue(next_leaf, 701, 'Multiple statements on one line (colon)') + elif leaf.value == ';': + if leaf.get_next_leaf().type in ('newline', 'endmarker'): + self.add_issue(leaf, 703, 'Statement ends with a semicolon') + else: + self.add_issue(leaf, 702, 'Multiple statements on one line (semicolon)') + elif leaf.value in ('==', '!='): + comparison = leaf.parent + index = comparison.children.index(leaf) + left = comparison.children[index - 1] + right = comparison.children[index + 1] + for node in left, right: + if node.type == 'keyword' or node.type == 'name': + if node.value == 'None': + message = "comparison to None should be 'if cond is None:'" + self.add_issue(leaf, 711, message) + break + elif node.value in ('True', 'False'): + message = "comparison to False/True should be " \ + "'if cond is True:' or 'if cond:'" + self.add_issue(leaf, 712, message) + break + elif leaf.value in ('in', 'is'): + comparison = leaf.parent + if comparison.type == 'comparison' and comparison.parent.type == 'not_test': + if leaf.value == 'in': + self.add_issue(leaf, 713, "test for membership should be 'not in'") + else: + self.add_issue(leaf, 714, "test for object identity should be 'is not'") + elif typ == 'string': + # Checking multiline strings + for i, line in enumerate(leaf.value.splitlines()[1:]): + indentation = re.match(r'[ \t]*', line).group(0) + start_pos = leaf.line + i, len(indentation) + # TODO check multiline indentation. + start_pos + elif typ == 'endmarker': + if self._newline_count >= 2: + self.add_issue(leaf, 391, 'Blank line at end of file') + + def add_issue(self, node, code, message): + if self._previous_leaf is not None: + if self._previous_leaf.search_ancestor('error_node') is not None: + return + if self._previous_leaf.type == 'error_leaf': + return + if node.search_ancestor('error_node') is not None: + return + if code in (901, 903): + # 901 and 903 are raised by the ErrorFinder. + super().add_issue(node, code, message) + else: + # Skip ErrorFinder here, because it has custom behavior. + super(ErrorFinder, self).add_issue(node, code, message) + + +class PEP8NormalizerConfig(ErrorFinderConfig): + normalizer_class = PEP8Normalizer + """ + Normalizing to PEP8. Not really implemented, yet. + """ + def __init__(self, indentation=' ' * 4, hanging_indentation=None, + max_characters=79, spaces_before_comment=2): + self.indentation = indentation + if hanging_indentation is None: + hanging_indentation = indentation + self.hanging_indentation = hanging_indentation + self.closing_bracket_hanging_indentation = '' + self.break_after_binary = False + self.max_characters = max_characters + self.spaces_before_comment = spaces_before_comment + + +# TODO this is not yet ready. +# @PEP8Normalizer.register_rule(type='endmarker') +class BlankLineAtEnd(Rule): + code = 392 + message = 'Blank line at end of file' + + def is_issue(self, leaf): + return self._newline_count >= 2 diff --git a/bundle/jedi-vim/pythonx/parso/parso/python/prefix.py b/bundle/jedi-vim/pythonx/parso/parso/python/prefix.py new file mode 100644 index 000000000..6b8d59e69 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/python/prefix.py @@ -0,0 +1,106 @@ +import re +from codecs import BOM_UTF8 +from typing import Tuple + +from parso.python.tokenize import group + +unicode_bom = BOM_UTF8.decode('utf-8') + + +class PrefixPart: + def __init__(self, leaf, typ, value, spacing='', start_pos=None): + assert start_pos is not None + self.parent = leaf + self.type = typ + self.value = value + self.spacing = spacing + self.start_pos: Tuple[int, int] = start_pos + + @property + def end_pos(self) -> Tuple[int, int]: + if self.value.endswith('\n') or self.value.endswith('\r'): + return self.start_pos[0] + 1, 0 + if self.value == unicode_bom: + # The bom doesn't have a length at the start of a Python file. + return self.start_pos + return self.start_pos[0], self.start_pos[1] + len(self.value) + + def create_spacing_part(self): + column = self.start_pos[1] - len(self.spacing) + return PrefixPart( + self.parent, 'spacing', self.spacing, + start_pos=(self.start_pos[0], column) + ) + + def __repr__(self): + return '%s(%s, %s, %s)' % ( + self.__class__.__name__, + self.type, + repr(self.value), + self.start_pos + ) + + def search_ancestor(self, *node_types): + node = self.parent + while node is not None: + if node.type in node_types: + return node + node = node.parent + return None + + +_comment = r'#[^\n\r\f]*' +_backslash = r'\\\r?\n|\\\r' +_newline = r'\r?\n|\r' +_form_feed = r'\f' +_only_spacing = '$' +_spacing = r'[ \t]*' +_bom = unicode_bom + +_regex = group( + _comment, _backslash, _newline, _form_feed, _only_spacing, _bom, + capture=True +) +_regex = re.compile(group(_spacing, capture=True) + _regex) + + +_types = { + '#': 'comment', + '\\': 'backslash', + '\f': 'formfeed', + '\n': 'newline', + '\r': 'newline', + unicode_bom: 'bom' +} + + +def split_prefix(leaf, start_pos): + line, column = start_pos + start = 0 + value = spacing = '' + bom = False + while start != len(leaf.prefix): + match = _regex.match(leaf.prefix, start) + spacing = match.group(1) + value = match.group(2) + if not value: + break + type_ = _types[value[0]] + yield PrefixPart( + leaf, type_, value, spacing, + start_pos=(line, column + start - int(bom) + len(spacing)) + ) + if type_ == 'bom': + bom = True + + start = match.end(0) + if value.endswith('\n') or value.endswith('\r'): + line += 1 + column = -start + + if value: + spacing = '' + yield PrefixPart( + leaf, 'spacing', spacing, + start_pos=(line, column + start) + ) diff --git a/bundle/jedi-vim/pythonx/parso/parso/python/token.py b/bundle/jedi-vim/pythonx/parso/parso/python/token.py new file mode 100644 index 000000000..9b6f4c7c1 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/python/token.py @@ -0,0 +1,31 @@ +from __future__ import absolute_import + +from enum import Enum + + +class TokenType: + name: str + contains_syntax: bool + + def __init__(self, name: str, contains_syntax: bool = False): + self.name = name + self.contains_syntax = contains_syntax + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, self.name) + + +class PythonTokenTypes(Enum): + STRING = TokenType('STRING') + NUMBER = TokenType('NUMBER') + NAME = TokenType('NAME', contains_syntax=True) + ERRORTOKEN = TokenType('ERRORTOKEN') + NEWLINE = TokenType('NEWLINE') + INDENT = TokenType('INDENT') + DEDENT = TokenType('DEDENT') + ERROR_DEDENT = TokenType('ERROR_DEDENT') + FSTRING_STRING = TokenType('FSTRING_STRING') + FSTRING_START = TokenType('FSTRING_START') + FSTRING_END = TokenType('FSTRING_END') + OP = TokenType('OP', contains_syntax=True) + ENDMARKER = TokenType('ENDMARKER') diff --git a/bundle/jedi-vim/pythonx/parso/parso/python/tokenize.py b/bundle/jedi-vim/pythonx/parso/parso/python/tokenize.py new file mode 100644 index 000000000..e3ffe440b --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/python/tokenize.py @@ -0,0 +1,687 @@ +# -*- coding: utf-8 -*- +""" +This tokenizer has been copied from the ``tokenize.py`` standard library +tokenizer. The reason was simple: The standard library tokenizer fails +if the indentation is not right. To make it possible to do error recovery the + tokenizer needed to be rewritten. + +Basically this is a stripped down version of the standard library module, so +you can read the documentation there. Additionally we included some speed and +memory optimizations here. +""" +from __future__ import absolute_import + +import sys +import re +import itertools as _itertools +from codecs import BOM_UTF8 +from typing import NamedTuple, Tuple, Iterator, Iterable, List, Dict, \ + Pattern, Set + +from parso.python.token import PythonTokenTypes +from parso.utils import split_lines, PythonVersionInfo, parse_version_string + + +# Maximum code point of Unicode 6.0: 0x10ffff (1,114,111) +MAX_UNICODE = '\U0010ffff' + +STRING = PythonTokenTypes.STRING +NAME = PythonTokenTypes.NAME +NUMBER = PythonTokenTypes.NUMBER +OP = PythonTokenTypes.OP +NEWLINE = PythonTokenTypes.NEWLINE +INDENT = PythonTokenTypes.INDENT +DEDENT = PythonTokenTypes.DEDENT +ENDMARKER = PythonTokenTypes.ENDMARKER +ERRORTOKEN = PythonTokenTypes.ERRORTOKEN +ERROR_DEDENT = PythonTokenTypes.ERROR_DEDENT +FSTRING_START = PythonTokenTypes.FSTRING_START +FSTRING_STRING = PythonTokenTypes.FSTRING_STRING +FSTRING_END = PythonTokenTypes.FSTRING_END + + +class TokenCollection(NamedTuple): + pseudo_token: Pattern + single_quoted: Set[str] + triple_quoted: Set[str] + endpats: Dict[str, Pattern] + whitespace: Pattern + fstring_pattern_map: Dict[str, str] + always_break_tokens: Tuple[str] + + +BOM_UTF8_STRING = BOM_UTF8.decode('utf-8') + +_token_collection_cache: Dict[PythonVersionInfo, TokenCollection] = {} + + +def group(*choices, capture=False, **kwargs): + assert not kwargs + + start = '(' + if not capture: + start += '?:' + return start + '|'.join(choices) + ')' + + +def maybe(*choices): + return group(*choices) + '?' + + +# Return the empty string, plus all of the valid string prefixes. +def _all_string_prefixes(*, include_fstring=False, only_fstring=False): + def different_case_versions(prefix): + for s in _itertools.product(*[(c, c.upper()) for c in prefix]): + yield ''.join(s) + # The valid string prefixes. Only contain the lower case versions, + # and don't contain any permuations (include 'fr', but not + # 'rf'). The various permutations will be generated. + valid_string_prefixes = ['b', 'r', 'u', 'br'] + + result = {''} + if include_fstring: + f = ['f', 'fr'] + if only_fstring: + valid_string_prefixes = f + result = set() + else: + valid_string_prefixes += f + elif only_fstring: + return set() + + # if we add binary f-strings, add: ['fb', 'fbr'] + for prefix in valid_string_prefixes: + for t in _itertools.permutations(prefix): + # create a list with upper and lower versions of each + # character + result.update(different_case_versions(t)) + return result + + +def _compile(expr): + return re.compile(expr, re.UNICODE) + + +def _get_token_collection(version_info): + try: + return _token_collection_cache[tuple(version_info)] + except KeyError: + _token_collection_cache[tuple(version_info)] = result = \ + _create_token_collection(version_info) + return result + + +unicode_character_name = r'[A-Za-z0-9\-]+(?: [A-Za-z0-9\-]+)*' +fstring_string_single_line = _compile( + r'(?:\{\{|\}\}|\\N\{' + unicode_character_name + + r'\}|\\(?:\r\n?|\n)|\\[^\r\nN]|[^{}\r\n\\])+' +) +fstring_string_multi_line = _compile( + r'(?:\{\{|\}\}|\\N\{' + unicode_character_name + r'\}|\\[^N]|[^{}\\])+' +) +fstring_format_spec_single_line = _compile(r'(?:\\(?:\r\n?|\n)|[^{}\r\n])+') +fstring_format_spec_multi_line = _compile(r'[^{}]+') + + +def _create_token_collection(version_info): + # Note: we use unicode matching for names ("\w") but ascii matching for + # number literals. + Whitespace = r'[ \f\t]*' + whitespace = _compile(Whitespace) + Comment = r'#[^\r\n]*' + Name = '([A-Za-z_0-9\u0080-' + MAX_UNICODE + ']+)' + + Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+' + Binnumber = r'0[bB](?:_?[01])+' + Octnumber = r'0[oO](?:_?[0-7])+' + Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)' + Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) + Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*' + Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?', + r'\.[0-9](?:_?[0-9])*') + maybe(Exponent) + Expfloat = r'[0-9](?:_?[0-9])*' + Exponent + Floatnumber = group(Pointfloat, Expfloat) + Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]') + Number = group(Imagnumber, Floatnumber, Intnumber) + + # Note that since _all_string_prefixes includes the empty string, + # StringPrefix can be the empty string (making it optional). + possible_prefixes = _all_string_prefixes() + StringPrefix = group(*possible_prefixes) + StringPrefixWithF = group(*_all_string_prefixes(include_fstring=True)) + fstring_prefixes = _all_string_prefixes(include_fstring=True, only_fstring=True) + FStringStart = group(*fstring_prefixes) + + # Tail end of ' string. + Single = r"(?:\\.|[^'\\])*'" + # Tail end of " string. + Double = r'(?:\\.|[^"\\])*"' + # Tail end of ''' string. + Single3 = r"(?:\\.|'(?!'')|[^'\\])*'''" + # Tail end of """ string. + Double3 = r'(?:\\.|"(?!"")|[^"\\])*"""' + Triple = group(StringPrefixWithF + "'''", StringPrefixWithF + '"""') + + # Because of leftmost-then-longest match semantics, be sure to put the + # longest operators first (e.g., if = came before ==, == would get + # recognized as two instances of =). + Operator = group(r"\*\*=?", r">>=?", r"<<=?", + r"//=?", r"->", + r"[+\-*/%&@`|^!=<>]=?", + r"~") + + Bracket = '[][(){}]' + + special_args = [r'\.\.\.', r'\r\n?', r'\n', r'[;.,@]'] + if version_info >= (3, 8): + special_args.insert(0, ":=?") + else: + special_args.insert(0, ":") + Special = group(*special_args) + + Funny = group(Operator, Bracket, Special) + + # First (or only) line of ' or " string. + ContStr = group(StringPrefix + r"'[^\r\n'\\]*(?:\\.[^\r\n'\\]*)*" + + group("'", r'\\(?:\r\n?|\n)'), + StringPrefix + r'"[^\r\n"\\]*(?:\\.[^\r\n"\\]*)*' + + group('"', r'\\(?:\r\n?|\n)')) + pseudo_extra_pool = [Comment, Triple] + all_quotes = '"', "'", '"""', "'''" + if fstring_prefixes: + pseudo_extra_pool.append(FStringStart + group(*all_quotes)) + + PseudoExtras = group(r'\\(?:\r\n?|\n)|\Z', *pseudo_extra_pool) + PseudoToken = group(Whitespace, capture=True) + \ + group(PseudoExtras, Number, Funny, ContStr, Name, capture=True) + + # For a given string prefix plus quotes, endpats maps it to a regex + # to match the remainder of that string. _prefix can be empty, for + # a normal single or triple quoted string (with no prefix). + endpats = {} + for _prefix in possible_prefixes: + endpats[_prefix + "'"] = _compile(Single) + endpats[_prefix + '"'] = _compile(Double) + endpats[_prefix + "'''"] = _compile(Single3) + endpats[_prefix + '"""'] = _compile(Double3) + + # A set of all of the single and triple quoted string prefixes, + # including the opening quotes. + single_quoted = set() + triple_quoted = set() + fstring_pattern_map = {} + for t in possible_prefixes: + for quote in '"', "'": + single_quoted.add(t + quote) + + for quote in '"""', "'''": + triple_quoted.add(t + quote) + + for t in fstring_prefixes: + for quote in all_quotes: + fstring_pattern_map[t + quote] = quote + + ALWAYS_BREAK_TOKENS = (';', 'import', 'class', 'def', 'try', 'except', + 'finally', 'while', 'with', 'return', 'continue', + 'break', 'del', 'pass', 'global', 'assert', 'nonlocal') + pseudo_token_compiled = _compile(PseudoToken) + return TokenCollection( + pseudo_token_compiled, single_quoted, triple_quoted, endpats, + whitespace, fstring_pattern_map, set(ALWAYS_BREAK_TOKENS) + ) + + +class Token(NamedTuple): + type: PythonTokenTypes + string: str + start_pos: Tuple[int, int] + prefix: str + + @property + def end_pos(self) -> Tuple[int, int]: + lines = split_lines(self.string) + if len(lines) > 1: + return self.start_pos[0] + len(lines) - 1, 0 + else: + return self.start_pos[0], self.start_pos[1] + len(self.string) + + +class PythonToken(Token): + def __repr__(self): + return ('TokenInfo(type=%s, string=%r, start_pos=%r, prefix=%r)' % + self._replace(type=self.type.name)) + + +class FStringNode: + def __init__(self, quote): + self.quote = quote + self.parentheses_count = 0 + self.previous_lines = '' + self.last_string_start_pos = None + # In the syntax there can be multiple format_spec's nested: + # {x:{y:3}} + self.format_spec_count = 0 + + def open_parentheses(self, character): + self.parentheses_count += 1 + + def close_parentheses(self, character): + self.parentheses_count -= 1 + if self.parentheses_count == 0: + # No parentheses means that the format spec is also finished. + self.format_spec_count = 0 + + def allow_multiline(self): + return len(self.quote) == 3 + + def is_in_expr(self): + return self.parentheses_count > self.format_spec_count + + def is_in_format_spec(self): + return not self.is_in_expr() and self.format_spec_count + + +def _close_fstring_if_necessary(fstring_stack, string, line_nr, column, additional_prefix): + for fstring_stack_index, node in enumerate(fstring_stack): + lstripped_string = string.lstrip() + len_lstrip = len(string) - len(lstripped_string) + if lstripped_string.startswith(node.quote): + token = PythonToken( + FSTRING_END, + node.quote, + (line_nr, column + len_lstrip), + prefix=additional_prefix+string[:len_lstrip], + ) + additional_prefix = '' + assert not node.previous_lines + del fstring_stack[fstring_stack_index:] + return token, '', len(node.quote) + len_lstrip + return None, additional_prefix, 0 + + +def _find_fstring_string(endpats, fstring_stack, line, lnum, pos): + tos = fstring_stack[-1] + allow_multiline = tos.allow_multiline() + if tos.is_in_format_spec(): + if allow_multiline: + regex = fstring_format_spec_multi_line + else: + regex = fstring_format_spec_single_line + else: + if allow_multiline: + regex = fstring_string_multi_line + else: + regex = fstring_string_single_line + + match = regex.match(line, pos) + if match is None: + return tos.previous_lines, pos + + if not tos.previous_lines: + tos.last_string_start_pos = (lnum, pos) + + string = match.group(0) + for fstring_stack_node in fstring_stack: + end_match = endpats[fstring_stack_node.quote].match(string) + if end_match is not None: + string = end_match.group(0)[:-len(fstring_stack_node.quote)] + + new_pos = pos + new_pos += len(string) + # even if allow_multiline is False, we still need to check for trailing + # newlines, because a single-line f-string can contain line continuations + if string.endswith('\n') or string.endswith('\r'): + tos.previous_lines += string + string = '' + else: + string = tos.previous_lines + string + + return string, new_pos + + +def tokenize( + code: str, *, version_info: PythonVersionInfo, start_pos: Tuple[int, int] = (1, 0) +) -> Iterator[PythonToken]: + """Generate tokens from a the source code (string).""" + lines = split_lines(code, keepends=True) + return tokenize_lines(lines, version_info=version_info, start_pos=start_pos) + + +def _print_tokens(func): + """ + A small helper function to help debug the tokenize_lines function. + """ + def wrapper(*args, **kwargs): + for token in func(*args, **kwargs): + print(token) # This print is intentional for debugging! + yield token + + return wrapper + + +# @_print_tokens +def tokenize_lines( + lines: Iterable[str], + *, + version_info: PythonVersionInfo, + indents: List[int] = None, + start_pos: Tuple[int, int] = (1, 0), + is_first_token=True, +) -> Iterator[PythonToken]: + """ + A heavily modified Python standard library tokenizer. + + Additionally to the default information, yields also the prefix of each + token. This idea comes from lib2to3. The prefix contains all information + that is irrelevant for the parser like newlines in parentheses or comments. + """ + def dedent_if_necessary(start): + while start < indents[-1]: + if start > indents[-2]: + yield PythonToken(ERROR_DEDENT, '', (lnum, start), '') + indents[-1] = start + break + indents.pop() + yield PythonToken(DEDENT, '', spos, '') + + pseudo_token, single_quoted, triple_quoted, endpats, whitespace, \ + fstring_pattern_map, always_break_tokens, = \ + _get_token_collection(version_info) + paren_level = 0 # count parentheses + if indents is None: + indents = [0] + max_ = 0 + numchars = '0123456789' + contstr = '' + contline: str + contstr_start: Tuple[int, int] + endprog: Pattern + # We start with a newline. This makes indent at the first position + # possible. It's not valid Python, but still better than an INDENT in the + # second line (and not in the first). This makes quite a few things in + # Jedi's fast parser possible. + new_line = True + prefix = '' # Should never be required, but here for safety + additional_prefix = '' + lnum = start_pos[0] - 1 + fstring_stack: List[FStringNode] = [] + for line in lines: # loop over lines in stream + lnum += 1 + pos = 0 + max_ = len(line) + if is_first_token: + if line.startswith(BOM_UTF8_STRING): + additional_prefix = BOM_UTF8_STRING + line = line[1:] + max_ = len(line) + + # Fake that the part before was already parsed. + line = '^' * start_pos[1] + line + pos = start_pos[1] + max_ += start_pos[1] + + is_first_token = False + + if contstr: # continued string + endmatch = endprog.match(line) # noqa: F821 + if endmatch: + pos = endmatch.end(0) + yield PythonToken( + STRING, contstr + line[:pos], + contstr_start, prefix) # noqa: F821 + contstr = '' + contline = '' + else: + contstr = contstr + line + contline = contline + line + continue + + while pos < max_: + if fstring_stack: + tos = fstring_stack[-1] + if not tos.is_in_expr(): + string, pos = _find_fstring_string(endpats, fstring_stack, line, lnum, pos) + if string: + yield PythonToken( + FSTRING_STRING, string, + tos.last_string_start_pos, + # Never has a prefix because it can start anywhere and + # include whitespace. + prefix='' + ) + tos.previous_lines = '' + continue + if pos == max_: + break + + rest = line[pos:] + fstring_end_token, additional_prefix, quote_length = _close_fstring_if_necessary( + fstring_stack, + rest, + lnum, + pos, + additional_prefix, + ) + pos += quote_length + if fstring_end_token is not None: + yield fstring_end_token + continue + + # in an f-string, match until the end of the string + if fstring_stack: + string_line = line + for fstring_stack_node in fstring_stack: + quote = fstring_stack_node.quote + end_match = endpats[quote].match(line, pos) + if end_match is not None: + end_match_string = end_match.group(0) + if len(end_match_string) - len(quote) + pos < len(string_line): + string_line = line[:pos] + end_match_string[:-len(quote)] + pseudomatch = pseudo_token.match(string_line, pos) + else: + pseudomatch = pseudo_token.match(line, pos) + + if pseudomatch: + prefix = additional_prefix + pseudomatch.group(1) + additional_prefix = '' + start, pos = pseudomatch.span(2) + spos = (lnum, start) + token = pseudomatch.group(2) + if token == '': + assert prefix + additional_prefix = prefix + # This means that we have a line with whitespace/comments at + # the end, which just results in an endmarker. + break + initial = token[0] + else: + match = whitespace.match(line, pos) + initial = line[match.end()] + start = match.end() + spos = (lnum, start) + + if new_line and initial not in '\r\n#' and (initial != '\\' or pseudomatch is None): + new_line = False + if paren_level == 0 and not fstring_stack: + indent_start = start + if indent_start > indents[-1]: + yield PythonToken(INDENT, '', spos, '') + indents.append(indent_start) + yield from dedent_if_necessary(indent_start) + + if not pseudomatch: # scan for tokens + match = whitespace.match(line, pos) + if new_line and paren_level == 0 and not fstring_stack: + yield from dedent_if_necessary(match.end()) + pos = match.end() + new_line = False + yield PythonToken( + ERRORTOKEN, line[pos], (lnum, pos), + additional_prefix + match.group(0) + ) + additional_prefix = '' + pos += 1 + continue + + if (initial in numchars # ordinary number + or (initial == '.' and token != '.' and token != '...')): + yield PythonToken(NUMBER, token, spos, prefix) + elif pseudomatch.group(3) is not None: # ordinary name + if token in always_break_tokens and (fstring_stack or paren_level): + fstring_stack[:] = [] + paren_level = 0 + # We only want to dedent if the token is on a new line. + m = re.match(r'[ \f\t]*$', line[:start]) + if m is not None: + yield from dedent_if_necessary(m.end()) + if token.isidentifier(): + yield PythonToken(NAME, token, spos, prefix) + else: + yield from _split_illegal_unicode_name(token, spos, prefix) + elif initial in '\r\n': + if any(not f.allow_multiline() for f in fstring_stack): + fstring_stack.clear() + + if not new_line and paren_level == 0 and not fstring_stack: + yield PythonToken(NEWLINE, token, spos, prefix) + else: + additional_prefix = prefix + token + new_line = True + elif initial == '#': # Comments + assert not token.endswith("\n") and not token.endswith("\r") + if fstring_stack and fstring_stack[-1].is_in_expr(): + # `#` is not allowed in f-string expressions + yield PythonToken(ERRORTOKEN, initial, spos, prefix) + pos = start + 1 + else: + additional_prefix = prefix + token + elif token in triple_quoted: + endprog = endpats[token] + endmatch = endprog.match(line, pos) + if endmatch: # all on one line + pos = endmatch.end(0) + token = line[start:pos] + yield PythonToken(STRING, token, spos, prefix) + else: + contstr_start = spos # multiple lines + contstr = line[start:] + contline = line + break + + # Check up to the first 3 chars of the token to see if + # they're in the single_quoted set. If so, they start + # a string. + # We're using the first 3, because we're looking for + # "rb'" (for example) at the start of the token. If + # we switch to longer prefixes, this needs to be + # adjusted. + # Note that initial == token[:1]. + # Also note that single quote checking must come after + # triple quote checking (above). + elif initial in single_quoted or \ + token[:2] in single_quoted or \ + token[:3] in single_quoted: + if token[-1] in '\r\n': # continued string + # This means that a single quoted string ends with a + # backslash and is continued. + contstr_start = lnum, start + endprog = (endpats.get(initial) or endpats.get(token[1]) + or endpats.get(token[2])) + contstr = line[start:] + contline = line + break + else: # ordinary string + yield PythonToken(STRING, token, spos, prefix) + elif token in fstring_pattern_map: # The start of an fstring. + fstring_stack.append(FStringNode(fstring_pattern_map[token])) + yield PythonToken(FSTRING_START, token, spos, prefix) + elif initial == '\\' and line[start:] in ('\\\n', '\\\r\n', '\\\r'): # continued stmt + additional_prefix += prefix + line[start:] + break + else: + if token in '([{': + if fstring_stack: + fstring_stack[-1].open_parentheses(token) + else: + paren_level += 1 + elif token in ')]}': + if fstring_stack: + fstring_stack[-1].close_parentheses(token) + else: + if paren_level: + paren_level -= 1 + elif token.startswith(':') and fstring_stack \ + and fstring_stack[-1].parentheses_count \ + - fstring_stack[-1].format_spec_count == 1: + # `:` and `:=` both count + fstring_stack[-1].format_spec_count += 1 + token = ':' + pos = start + 1 + + yield PythonToken(OP, token, spos, prefix) + + if contstr: + yield PythonToken(ERRORTOKEN, contstr, contstr_start, prefix) + if contstr.endswith('\n') or contstr.endswith('\r'): + new_line = True + + if fstring_stack: + tos = fstring_stack[-1] + if tos.previous_lines: + yield PythonToken( + FSTRING_STRING, tos.previous_lines, + tos.last_string_start_pos, + # Never has a prefix because it can start anywhere and + # include whitespace. + prefix='' + ) + + end_pos = lnum, max_ + # As the last position we just take the maximally possible position. We + # remove -1 for the last new line. + for indent in indents[1:]: + indents.pop() + yield PythonToken(DEDENT, '', end_pos, '') + yield PythonToken(ENDMARKER, '', end_pos, additional_prefix) + + +def _split_illegal_unicode_name(token, start_pos, prefix): + def create_token(): + return PythonToken(ERRORTOKEN if is_illegal else NAME, found, pos, prefix) + + found = '' + is_illegal = False + pos = start_pos + for i, char in enumerate(token): + if is_illegal: + if char.isidentifier(): + yield create_token() + found = char + is_illegal = False + prefix = '' + pos = start_pos[0], start_pos[1] + i + else: + found += char + else: + new_found = found + char + if new_found.isidentifier(): + found = new_found + else: + if found: + yield create_token() + prefix = '' + pos = start_pos[0], start_pos[1] + i + found = char + is_illegal = True + + if found: + yield create_token() + + +if __name__ == "__main__": + path = sys.argv[1] + with open(path) as f: + code = f.read() + + for token in tokenize(code, version_info=parse_version_string('3.10')): + print(token) diff --git a/bundle/jedi-vim/pythonx/parso/parso/python/tree.py b/bundle/jedi-vim/pythonx/parso/parso/python/tree.py new file mode 100644 index 000000000..ebb408703 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/python/tree.py @@ -0,0 +1,1242 @@ +""" +This is the syntax tree for Python 3 syntaxes. The classes represent +syntax elements like functions and imports. + +All of the nodes can be traced back to the `Python grammar file +`_. If you want to know how +a tree is structured, just analyse that file (for each Python version it's a +bit different). + +There's a lot of logic here that makes it easier for Jedi (and other libraries) +to deal with a Python syntax tree. + +By using :py:meth:`parso.tree.NodeOrLeaf.get_code` on a module, you can get +back the 1-to-1 representation of the input given to the parser. This is +important if you want to refactor a parser tree. + +>>> from parso import parse +>>> parser = parse('import os') +>>> module = parser.get_root_node() +>>> module + + +Any subclasses of :class:`Scope`, including :class:`Module` has an attribute +:attr:`iter_imports `: + +>>> list(module.iter_imports()) +[] + +Changes to the Python Grammar +----------------------------- + +A few things have changed when looking at Python grammar files: + +- :class:`Param` does not exist in Python grammar files. It is essentially a + part of a ``parameters`` node. |parso| splits it up to make it easier to + analyse parameters. However this just makes it easier to deal with the syntax + tree, it doesn't actually change the valid syntax. +- A few nodes like `lambdef` and `lambdef_nocond` have been merged in the + syntax tree to make it easier to do deal with them. + +Parser Tree Classes +------------------- +""" + +import re +try: + from collections.abc import Mapping +except ImportError: + from collections import Mapping +from typing import Tuple + +from parso.tree import Node, BaseNode, Leaf, ErrorNode, ErrorLeaf, search_ancestor # noqa +from parso.python.prefix import split_prefix +from parso.utils import split_lines + +_FLOW_CONTAINERS = set(['if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', + 'with_stmt', 'async_stmt', 'suite']) +_RETURN_STMT_CONTAINERS = set(['suite', 'simple_stmt']) | _FLOW_CONTAINERS + +_FUNC_CONTAINERS = set( + ['suite', 'simple_stmt', 'decorated', 'async_funcdef'] +) | _FLOW_CONTAINERS + +_GET_DEFINITION_TYPES = set([ + 'expr_stmt', 'sync_comp_for', 'with_stmt', 'for_stmt', 'import_name', + 'import_from', 'param', 'del_stmt', 'namedexpr_test', +]) +_IMPORTS = set(['import_name', 'import_from']) + + +class DocstringMixin: + __slots__ = () + + def get_doc_node(self): + """ + Returns the string leaf of a docstring. e.g. ``r'''foo'''``. + """ + if self.type == 'file_input': + node = self.children[0] + elif self.type in ('funcdef', 'classdef'): + node = self.children[self.children.index(':') + 1] + if node.type == 'suite': # Normally a suite + node = node.children[1] # -> NEWLINE stmt + else: # ExprStmt + simple_stmt = self.parent + c = simple_stmt.parent.children + index = c.index(simple_stmt) + if not index: + return None + node = c[index - 1] + + if node.type == 'simple_stmt': + node = node.children[0] + if node.type == 'string': + return node + return None + + +class PythonMixin: + """ + Some Python specific utilities. + """ + __slots__ = () + + def get_name_of_position(self, position): + """ + Given a (line, column) tuple, returns a :py:class:`Name` or ``None`` if + there is no name at that position. + """ + for c in self.children: + if isinstance(c, Leaf): + if c.type == 'name' and c.start_pos <= position <= c.end_pos: + return c + else: + result = c.get_name_of_position(position) + if result is not None: + return result + return None + + +class PythonLeaf(PythonMixin, Leaf): + __slots__ = () + + def _split_prefix(self): + return split_prefix(self, self.get_start_pos_of_prefix()) + + def get_start_pos_of_prefix(self): + """ + Basically calls :py:meth:`parso.tree.NodeOrLeaf.get_start_pos_of_prefix`. + """ + # TODO it is really ugly that we have to override it. Maybe change + # indent error leafs somehow? No idea how, though. + previous_leaf = self.get_previous_leaf() + if previous_leaf is not None and previous_leaf.type == 'error_leaf' \ + and previous_leaf.token_type in ('INDENT', 'DEDENT', 'ERROR_DEDENT'): + previous_leaf = previous_leaf.get_previous_leaf() + + if previous_leaf is None: # It's the first leaf. + lines = split_lines(self.prefix) + # + 1 is needed because split_lines always returns at least ['']. + return self.line - len(lines) + 1, 0 # It's the first leaf. + return previous_leaf.end_pos + + +class _LeafWithoutNewlines(PythonLeaf): + """ + Simply here to optimize performance. + """ + __slots__ = () + + @property + def end_pos(self) -> Tuple[int, int]: + return self.line, self.column + len(self.value) + + +# Python base classes +class PythonBaseNode(PythonMixin, BaseNode): + __slots__ = () + + +class PythonNode(PythonMixin, Node): + __slots__ = () + + +class PythonErrorNode(PythonMixin, ErrorNode): + __slots__ = () + + +class PythonErrorLeaf(ErrorLeaf, PythonLeaf): + __slots__ = () + + +class EndMarker(_LeafWithoutNewlines): + __slots__ = () + type = 'endmarker' + + def __repr__(self): + return "<%s: prefix=%s end_pos=%s>" % ( + type(self).__name__, repr(self.prefix), self.end_pos + ) + + +class Newline(PythonLeaf): + """Contains NEWLINE and ENDMARKER tokens.""" + __slots__ = () + type = 'newline' + + def __repr__(self): + return "<%s: %s>" % (type(self).__name__, repr(self.value)) + + +class Name(_LeafWithoutNewlines): + """ + A string. Sometimes it is important to know if the string belongs to a name + or not. + """ + type = 'name' + __slots__ = () + + def __repr__(self): + return "<%s: %s@%s,%s>" % (type(self).__name__, self.value, + self.line, self.column) + + def is_definition(self, include_setitem=False): + """ + Returns True if the name is being defined. + """ + return self.get_definition(include_setitem=include_setitem) is not None + + def get_definition(self, import_name_always=False, include_setitem=False): + """ + Returns None if there's no definition for a name. + + :param import_name_always: Specifies if an import name is always a + definition. Normally foo in `from foo import bar` is not a + definition. + """ + node = self.parent + type_ = node.type + + if type_ in ('funcdef', 'classdef'): + if self == node.name: + return node + return None + + if type_ == 'except_clause': + if self.get_previous_sibling() == 'as': + return node.parent # The try_stmt. + return None + + while node is not None: + if node.type == 'suite': + return None + if node.type in _GET_DEFINITION_TYPES: + if self in node.get_defined_names(include_setitem): + return node + if import_name_always and node.type in _IMPORTS: + return node + return None + node = node.parent + return None + + +class Literal(PythonLeaf): + __slots__ = () + + +class Number(Literal): + type = 'number' + __slots__ = () + + +class String(Literal): + type = 'string' + __slots__ = () + + @property + def string_prefix(self): + return re.match(r'\w*(?=[\'"])', self.value).group(0) + + def _get_payload(self): + match = re.search( + r'''('{3}|"{3}|'|")(.*)$''', + self.value, + flags=re.DOTALL + ) + return match.group(2)[:-len(match.group(1))] + + +class FStringString(PythonLeaf): + """ + f-strings contain f-string expressions and normal python strings. These are + the string parts of f-strings. + """ + type = 'fstring_string' + __slots__ = () + + +class FStringStart(PythonLeaf): + """ + f-strings contain f-string expressions and normal python strings. These are + the string parts of f-strings. + """ + type = 'fstring_start' + __slots__ = () + + +class FStringEnd(PythonLeaf): + """ + f-strings contain f-string expressions and normal python strings. These are + the string parts of f-strings. + """ + type = 'fstring_end' + __slots__ = () + + +class _StringComparisonMixin: + def __eq__(self, other): + """ + Make comparisons with strings easy. + Improves the readability of the parser. + """ + if isinstance(other, str): + return self.value == other + + return self is other + + def __hash__(self): + return hash(self.value) + + +class Operator(_LeafWithoutNewlines, _StringComparisonMixin): + type = 'operator' + __slots__ = () + + +class Keyword(_LeafWithoutNewlines, _StringComparisonMixin): + type = 'keyword' + __slots__ = () + + +class Scope(PythonBaseNode, DocstringMixin): + """ + Super class for the parser tree, which represents the state of a python + text file. + A Scope is either a function, class or lambda. + """ + __slots__ = () + + def __init__(self, children): + super().__init__(children) + + def iter_funcdefs(self): + """ + Returns a generator of `funcdef` nodes. + """ + return self._search_in_scope('funcdef') + + def iter_classdefs(self): + """ + Returns a generator of `classdef` nodes. + """ + return self._search_in_scope('classdef') + + def iter_imports(self): + """ + Returns a generator of `import_name` and `import_from` nodes. + """ + return self._search_in_scope('import_name', 'import_from') + + def _search_in_scope(self, *names): + def scan(children): + for element in children: + if element.type in names: + yield element + if element.type in _FUNC_CONTAINERS: + yield from scan(element.children) + + return scan(self.children) + + def get_suite(self): + """ + Returns the part that is executed by the function. + """ + return self.children[-1] + + def __repr__(self): + try: + name = self.name.value + except AttributeError: + name = '' + + return "<%s: %s@%s-%s>" % (type(self).__name__, name, + self.start_pos[0], self.end_pos[0]) + + +class Module(Scope): + """ + The top scope, which is always a module. + Depending on the underlying parser this may be a full module or just a part + of a module. + """ + __slots__ = ('_used_names',) + type = 'file_input' + + def __init__(self, children): + super().__init__(children) + self._used_names = None + + def _iter_future_import_names(self): + """ + :return: A list of future import names. + :rtype: list of str + """ + # In Python it's not allowed to use future imports after the first + # actual (non-future) statement. However this is not a linter here, + # just return all future imports. If people want to scan for issues + # they should use the API. + for imp in self.iter_imports(): + if imp.type == 'import_from' and imp.level == 0: + for path in imp.get_paths(): + names = [name.value for name in path] + if len(names) == 2 and names[0] == '__future__': + yield names[1] + + def get_used_names(self): + """ + Returns all the :class:`Name` leafs that exist in this module. This + includes both definitions and references of names. + """ + if self._used_names is None: + # Don't directly use self._used_names to eliminate a lookup. + dct = {} + + def recurse(node): + try: + children = node.children + except AttributeError: + if node.type == 'name': + arr = dct.setdefault(node.value, []) + arr.append(node) + else: + for child in children: + recurse(child) + + recurse(self) + self._used_names = UsedNamesMapping(dct) + return self._used_names + + +class Decorator(PythonBaseNode): + type = 'decorator' + __slots__ = () + + +class ClassOrFunc(Scope): + __slots__ = () + + @property + def name(self): + """ + Returns the `Name` leaf that defines the function or class name. + """ + return self.children[1] + + def get_decorators(self): + """ + :rtype: list of :class:`Decorator` + """ + decorated = self.parent + if decorated.type == 'async_funcdef': + decorated = decorated.parent + + if decorated.type == 'decorated': + if decorated.children[0].type == 'decorators': + return decorated.children[0].children + else: + return decorated.children[:1] + else: + return [] + + +class Class(ClassOrFunc): + """ + Used to store the parsed contents of a python class. + """ + type = 'classdef' + __slots__ = () + + def __init__(self, children): + super().__init__(children) + + def get_super_arglist(self): + """ + Returns the `arglist` node that defines the super classes. It returns + None if there are no arguments. + """ + if self.children[2] != '(': # Has no parentheses + return None + else: + if self.children[3] == ')': # Empty parentheses + return None + else: + return self.children[3] + + +def _create_params(parent, argslist_list): + """ + `argslist_list` is a list that can contain an argslist as a first item, but + most not. It's basically the items between the parameter brackets (which is + at most one item). + This function modifies the parser structure. It generates `Param` objects + from the normal ast. Those param objects do not exist in a normal ast, but + make the evaluation of the ast tree so much easier. + You could also say that this function replaces the argslist node with a + list of Param objects. + """ + try: + first = argslist_list[0] + except IndexError: + return [] + + if first.type in ('name', 'fpdef'): + return [Param([first], parent)] + elif first == '*': + return [first] + else: # argslist is a `typedargslist` or a `varargslist`. + if first.type == 'tfpdef': + children = [first] + else: + children = first.children + new_children = [] + start = 0 + # Start with offset 1, because the end is higher. + for end, child in enumerate(children + [None], 1): + if child is None or child == ',': + param_children = children[start:end] + if param_children: # Could as well be comma and then end. + if param_children[0] == '*' \ + and (len(param_children) == 1 + or param_children[1] == ',') \ + or param_children[0] == '/': + for p in param_children: + p.parent = parent + new_children += param_children + else: + new_children.append(Param(param_children, parent)) + start = end + return new_children + + +class Function(ClassOrFunc): + """ + Used to store the parsed contents of a python function. + + Children:: + + 0. + 1. + 2. parameter list (including open-paren and close-paren s) + 3. or 5. + 4. or 6. Node() representing function body + 3. -> (if annotation is also present) + 4. annotation (if present) + """ + type = 'funcdef' + + def __init__(self, children): + super().__init__(children) + parameters = self.children[2] # After `def foo` + parameters_children = parameters.children[1:-1] + # If input parameters list already has Param objects, keep it as is; + # otherwise, convert it to a list of Param objects. + if not any(isinstance(child, Param) for child in parameters_children): + parameters.children[1:-1] = _create_params(parameters, parameters_children) + + def _get_param_nodes(self): + return self.children[2].children + + def get_params(self): + """ + Returns a list of `Param()`. + """ + return [p for p in self._get_param_nodes() if p.type == 'param'] + + @property + def name(self): + return self.children[1] # First token after `def` + + def iter_yield_exprs(self): + """ + Returns a generator of `yield_expr`. + """ + def scan(children): + for element in children: + if element.type in ('classdef', 'funcdef', 'lambdef'): + continue + + try: + nested_children = element.children + except AttributeError: + if element.value == 'yield': + if element.parent.type == 'yield_expr': + yield element.parent + else: + yield element + else: + yield from scan(nested_children) + + return scan(self.children) + + def iter_return_stmts(self): + """ + Returns a generator of `return_stmt`. + """ + def scan(children): + for element in children: + if element.type == 'return_stmt' \ + or element.type == 'keyword' and element.value == 'return': + yield element + if element.type in _RETURN_STMT_CONTAINERS: + yield from scan(element.children) + + return scan(self.children) + + def iter_raise_stmts(self): + """ + Returns a generator of `raise_stmt`. Includes raise statements inside try-except blocks + """ + def scan(children): + for element in children: + if element.type == 'raise_stmt' \ + or element.type == 'keyword' and element.value == 'raise': + yield element + if element.type in _RETURN_STMT_CONTAINERS: + yield from scan(element.children) + + return scan(self.children) + + def is_generator(self): + """ + :return bool: Checks if a function is a generator or not. + """ + return next(self.iter_yield_exprs(), None) is not None + + @property + def annotation(self): + """ + Returns the test node after `->` or `None` if there is no annotation. + """ + try: + if self.children[3] == "->": + return self.children[4] + assert self.children[3] == ":" + return None + except IndexError: + return None + + +class Lambda(Function): + """ + Lambdas are basically trimmed functions, so give it the same interface. + + Children:: + + 0. + *. for each argument x + -2. + -1. Node() representing body + """ + type = 'lambdef' + __slots__ = () + + def __init__(self, children): + # We don't want to call the Function constructor, call its parent. + super(Function, self).__init__(children) + # Everything between `lambda` and the `:` operator is a parameter. + parameters_children = self.children[1:-2] + # If input children list already has Param objects, keep it as is; + # otherwise, convert it to a list of Param objects. + if not any(isinstance(child, Param) for child in parameters_children): + self.children[1:-2] = _create_params(self, parameters_children) + + @property + def name(self): + """ + Raises an AttributeError. Lambdas don't have a defined name. + """ + raise AttributeError("lambda is not named.") + + def _get_param_nodes(self): + return self.children[1:-2] + + @property + def annotation(self): + """ + Returns `None`, lambdas don't have annotations. + """ + return None + + def __repr__(self): + return "<%s@%s>" % (self.__class__.__name__, self.start_pos) + + +class Flow(PythonBaseNode): + __slots__ = () + + +class IfStmt(Flow): + type = 'if_stmt' + __slots__ = () + + def get_test_nodes(self): + """ + E.g. returns all the `test` nodes that are named as x, below: + + if x: + pass + elif x: + pass + """ + for i, c in enumerate(self.children): + if c in ('elif', 'if'): + yield self.children[i + 1] + + def get_corresponding_test_node(self, node): + """ + Searches for the branch in which the node is and returns the + corresponding test node (see function above). However if the node is in + the test node itself and not in the suite return None. + """ + start_pos = node.start_pos + for check_node in reversed(list(self.get_test_nodes())): + if check_node.start_pos < start_pos: + if start_pos < check_node.end_pos: + return None + # In this case the node is within the check_node itself, + # not in the suite + else: + return check_node + + def is_node_after_else(self, node): + """ + Checks if a node is defined after `else`. + """ + for c in self.children: + if c == 'else': + if node.start_pos > c.start_pos: + return True + else: + return False + + +class WhileStmt(Flow): + type = 'while_stmt' + __slots__ = () + + +class ForStmt(Flow): + type = 'for_stmt' + __slots__ = () + + def get_testlist(self): + """ + Returns the input node ``y`` from: ``for x in y:``. + """ + return self.children[3] + + def get_defined_names(self, include_setitem=False): + return _defined_names(self.children[1], include_setitem) + + +class TryStmt(Flow): + type = 'try_stmt' + __slots__ = () + + def get_except_clause_tests(self): + """ + Returns the ``test`` nodes found in ``except_clause`` nodes. + Returns ``[None]`` for except clauses without an exception given. + """ + for node in self.children: + if node.type == 'except_clause': + yield node.children[1] + elif node == 'except': + yield None + + +class WithStmt(Flow): + type = 'with_stmt' + __slots__ = () + + def get_defined_names(self, include_setitem=False): + """ + Returns the a list of `Name` that the with statement defines. The + defined names are set after `as`. + """ + names = [] + for with_item in self.children[1:-2:2]: + # Check with items for 'as' names. + if with_item.type == 'with_item': + names += _defined_names(with_item.children[2], include_setitem) + return names + + def get_test_node_from_name(self, name): + node = name.search_ancestor("with_item") + if node is None: + raise ValueError('The name is not actually part of a with statement.') + return node.children[0] + + +class Import(PythonBaseNode): + __slots__ = () + + def get_path_for_name(self, name): + """ + The path is the list of names that leads to the searched name. + + :return list of Name: + """ + try: + # The name may be an alias. If it is, just map it back to the name. + name = self._aliases()[name] + except KeyError: + pass + + for path in self.get_paths(): + if name in path: + return path[:path.index(name) + 1] + raise ValueError('Name should be defined in the import itself') + + def is_nested(self): + return False # By default, sub classes may overwrite this behavior + + def is_star_import(self): + return self.children[-1] == '*' + + +class ImportFrom(Import): + type = 'import_from' + __slots__ = () + + def get_defined_names(self, include_setitem=False): + """ + Returns the a list of `Name` that the import defines. The + defined names are set after `import` or in case an alias - `as` - is + present that name is returned. + """ + return [alias or name for name, alias in self._as_name_tuples()] + + def _aliases(self): + """Mapping from alias to its corresponding name.""" + return dict((alias, name) for name, alias in self._as_name_tuples() + if alias is not None) + + def get_from_names(self): + for n in self.children[1:]: + if n not in ('.', '...'): + break + if n.type == 'dotted_name': # from x.y import + return n.children[::2] + elif n == 'import': # from . import + return [] + else: # from x import + return [n] + + @property + def level(self): + """The level parameter of ``__import__``.""" + level = 0 + for n in self.children[1:]: + if n in ('.', '...'): + level += len(n.value) + else: + break + return level + + def _as_name_tuples(self): + last = self.children[-1] + if last == ')': + last = self.children[-2] + elif last == '*': + return # No names defined directly. + + if last.type == 'import_as_names': + as_names = last.children[::2] + else: + as_names = [last] + for as_name in as_names: + if as_name.type == 'name': + yield as_name, None + else: + yield as_name.children[::2] # yields x, y -> ``x as y`` + + def get_paths(self): + """ + The import paths defined in an import statement. Typically an array + like this: ``[, ]``. + + :return list of list of Name: + """ + dotted = self.get_from_names() + + if self.children[-1] == '*': + return [dotted] + return [dotted + [name] for name, alias in self._as_name_tuples()] + + +class ImportName(Import): + """For ``import_name`` nodes. Covers normal imports without ``from``.""" + type = 'import_name' + __slots__ = () + + def get_defined_names(self, include_setitem=False): + """ + Returns the a list of `Name` that the import defines. The defined names + is always the first name after `import` or in case an alias - `as` - is + present that name is returned. + """ + return [alias or path[0] for path, alias in self._dotted_as_names()] + + @property + def level(self): + """The level parameter of ``__import__``.""" + return 0 # Obviously 0 for imports without from. + + def get_paths(self): + return [path for path, alias in self._dotted_as_names()] + + def _dotted_as_names(self): + """Generator of (list(path), alias) where alias may be None.""" + dotted_as_names = self.children[1] + if dotted_as_names.type == 'dotted_as_names': + as_names = dotted_as_names.children[::2] + else: + as_names = [dotted_as_names] + + for as_name in as_names: + if as_name.type == 'dotted_as_name': + alias = as_name.children[2] + as_name = as_name.children[0] + else: + alias = None + if as_name.type == 'name': + yield [as_name], alias + else: + # dotted_names + yield as_name.children[::2], alias + + def is_nested(self): + """ + This checks for the special case of nested imports, without aliases and + from statement:: + + import foo.bar + """ + return bool([1 for path, alias in self._dotted_as_names() + if alias is None and len(path) > 1]) + + def _aliases(self): + """ + :return list of Name: Returns all the alias + """ + return dict((alias, path[-1]) for path, alias in self._dotted_as_names() + if alias is not None) + + +class KeywordStatement(PythonBaseNode): + """ + For the following statements: `assert`, `del`, `global`, `nonlocal`, + `raise`, `return`, `yield`. + + `pass`, `continue` and `break` are not in there, because they are just + simple keywords and the parser reduces it to a keyword. + """ + __slots__ = () + + @property + def type(self): + """ + Keyword statements start with the keyword and end with `_stmt`. You can + crosscheck this with the Python grammar. + """ + return '%s_stmt' % self.keyword + + @property + def keyword(self): + return self.children[0].value + + def get_defined_names(self, include_setitem=False): + keyword = self.keyword + if keyword == 'del': + return _defined_names(self.children[1], include_setitem) + if keyword in ('global', 'nonlocal'): + return self.children[1::2] + return [] + + +class AssertStmt(KeywordStatement): + __slots__ = () + + @property + def assertion(self): + return self.children[1] + + +class GlobalStmt(KeywordStatement): + __slots__ = () + + def get_global_names(self): + return self.children[1::2] + + +class ReturnStmt(KeywordStatement): + __slots__ = () + + +class YieldExpr(PythonBaseNode): + type = 'yield_expr' + __slots__ = () + + +def _defined_names(current, include_setitem): + """ + A helper function to find the defined names in statements, for loops and + list comprehensions. + """ + names = [] + if current.type in ('testlist_star_expr', 'testlist_comp', 'exprlist', 'testlist'): + for child in current.children[::2]: + names += _defined_names(child, include_setitem) + elif current.type in ('atom', 'star_expr'): + names += _defined_names(current.children[1], include_setitem) + elif current.type in ('power', 'atom_expr'): + if current.children[-2] != '**': # Just if there's no operation + trailer = current.children[-1] + if trailer.children[0] == '.': + names.append(trailer.children[1]) + elif trailer.children[0] == '[' and include_setitem: + for node in current.children[-2::-1]: + if node.type == 'trailer': + names.append(node.children[1]) + break + if node.type == 'name': + names.append(node) + break + else: + names.append(current) + return names + + +class ExprStmt(PythonBaseNode, DocstringMixin): + type = 'expr_stmt' + __slots__ = () + + def get_defined_names(self, include_setitem=False): + """ + Returns a list of `Name` defined before the `=` sign. + """ + names = [] + if self.children[1].type == 'annassign': + names = _defined_names(self.children[0], include_setitem) + return [ + name + for i in range(0, len(self.children) - 2, 2) + if '=' in self.children[i + 1].value + for name in _defined_names(self.children[i], include_setitem) + ] + names + + def get_rhs(self): + """Returns the right-hand-side of the equals.""" + node = self.children[-1] + if node.type == 'annassign': + if len(node.children) == 4: + node = node.children[3] + else: + node = node.children[1] + return node + + def yield_operators(self): + """ + Returns a generator of `+=`, `=`, etc. or None if there is no operation. + """ + first = self.children[1] + if first.type == 'annassign': + if len(first.children) <= 2: + return # No operator is available, it's just PEP 484. + + first = first.children[2] + yield first + + yield from self.children[3::2] + + +class NamedExpr(PythonBaseNode): + type = 'namedexpr_test' + + def get_defined_names(self, include_setitem=False): + return _defined_names(self.children[0], include_setitem) + + +class Param(PythonBaseNode): + """ + It's a helper class that makes business logic with params much easier. The + Python grammar defines no ``param`` node. It defines it in a different way + that is not really suited to working with parameters. + """ + type = 'param' + + def __init__(self, children, parent=None): + super().__init__(children) + self.parent = parent + + @property + def star_count(self): + """ + Is `0` in case of `foo`, `1` in case of `*foo` or `2` in case of + `**foo`. + """ + first = self.children[0] + if first in ('*', '**'): + return len(first.value) + return 0 + + @property + def default(self): + """ + The default is the test node that appears after the `=`. Is `None` in + case no default is present. + """ + has_comma = self.children[-1] == ',' + try: + if self.children[-2 - int(has_comma)] == '=': + return self.children[-1 - int(has_comma)] + except IndexError: + return None + + @property + def annotation(self): + """ + The default is the test node that appears after `:`. Is `None` in case + no annotation is present. + """ + tfpdef = self._tfpdef() + if tfpdef.type == 'tfpdef': + assert tfpdef.children[1] == ":" + assert len(tfpdef.children) == 3 + annotation = tfpdef.children[2] + return annotation + else: + return None + + def _tfpdef(self): + """ + tfpdef: see e.g. grammar36.txt. + """ + offset = int(self.children[0] in ('*', '**')) + return self.children[offset] + + @property + def name(self): + """ + The `Name` leaf of the param. + """ + if self._tfpdef().type == 'tfpdef': + return self._tfpdef().children[0] + else: + return self._tfpdef() + + def get_defined_names(self, include_setitem=False): + return [self.name] + + @property + def position_index(self): + """ + Property for the positional index of a paramter. + """ + index = self.parent.children.index(self) + try: + keyword_only_index = self.parent.children.index('*') + if index > keyword_only_index: + # Skip the ` *, ` + index -= 2 + except ValueError: + pass + try: + keyword_only_index = self.parent.children.index('/') + if index > keyword_only_index: + # Skip the ` /, ` + index -= 2 + except ValueError: + pass + return index - 1 + + def get_parent_function(self): + """ + Returns the function/lambda of a parameter. + """ + return self.search_ancestor('funcdef', 'lambdef') + + def get_code(self, include_prefix=True, include_comma=True): + """ + Like all the other get_code functions, but includes the param + `include_comma`. + + :param include_comma bool: If enabled includes the comma in the string output. + """ + if include_comma: + return super().get_code(include_prefix) + + children = self.children + if children[-1] == ',': + children = children[:-1] + return self._get_code_for_children( + children, + include_prefix=include_prefix + ) + + def __repr__(self): + default = '' if self.default is None else '=%s' % self.default.get_code() + return '<%s: %s>' % (type(self).__name__, str(self._tfpdef()) + default) + + +class SyncCompFor(PythonBaseNode): + type = 'sync_comp_for' + __slots__ = () + + def get_defined_names(self, include_setitem=False): + """ + Returns the a list of `Name` that the comprehension defines. + """ + # allow async for + return _defined_names(self.children[1], include_setitem) + + +# This is simply here so an older Jedi version can work with this new parso +# version. Can be deleted in the next release. +CompFor = SyncCompFor + + +class UsedNamesMapping(Mapping): + """ + This class exists for the sole purpose of creating an immutable dict. + """ + def __init__(self, dct): + self._dict = dct + + def __getitem__(self, key): + return self._dict[key] + + def __len__(self): + return len(self._dict) + + def __iter__(self): + return iter(self._dict) + + def __hash__(self): + return id(self) + + def __eq__(self, other): + # Comparing these dicts does not make sense. + return self is other diff --git a/bundle/jedi-vim/pythonx/parso/parso/tree.py b/bundle/jedi-vim/pythonx/parso/parso/tree.py new file mode 100644 index 000000000..e5298711e --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/tree.py @@ -0,0 +1,488 @@ +from abc import abstractmethod, abstractproperty +from typing import List, Optional, Tuple, Union + +from parso.utils import split_lines + + +def search_ancestor(node: 'NodeOrLeaf', *node_types: str) -> 'Optional[BaseNode]': + """ + Recursively looks at the parents of a node and returns the first found node + that matches ``node_types``. Returns ``None`` if no matching node is found. + + This function is deprecated, use :meth:`NodeOrLeaf.search_ancestor` instead. + + :param node: The ancestors of this node will be checked. + :param node_types: type names that are searched for. + """ + n = node.parent + while n is not None: + if n.type in node_types: + return n + n = n.parent + return None + + +class NodeOrLeaf: + """ + The base class for nodes and leaves. + """ + __slots__ = ('parent',) + type: str + ''' + The type is a string that typically matches the types of the grammar file. + ''' + parent: 'Optional[BaseNode]' + ''' + The parent :class:`BaseNode` of this node or leaf. + None if this is the root node. + ''' + + def get_root_node(self): + """ + Returns the root node of a parser tree. The returned node doesn't have + a parent node like all the other nodes/leaves. + """ + scope = self + while scope.parent is not None: + scope = scope.parent + return scope + + def get_next_sibling(self): + """ + Returns the node immediately following this node in this parent's + children list. If this node does not have a next sibling, it is None + """ + parent = self.parent + if parent is None: + return None + + # Can't use index(); we need to test by identity + for i, child in enumerate(parent.children): + if child is self: + try: + return self.parent.children[i + 1] + except IndexError: + return None + + def get_previous_sibling(self): + """ + Returns the node immediately preceding this node in this parent's + children list. If this node does not have a previous sibling, it is + None. + """ + parent = self.parent + if parent is None: + return None + + # Can't use index(); we need to test by identity + for i, child in enumerate(parent.children): + if child is self: + if i == 0: + return None + return self.parent.children[i - 1] + + def get_previous_leaf(self): + """ + Returns the previous leaf in the parser tree. + Returns `None` if this is the first element in the parser tree. + """ + if self.parent is None: + return None + + node = self + while True: + c = node.parent.children + i = c.index(node) + if i == 0: + node = node.parent + if node.parent is None: + return None + else: + node = c[i - 1] + break + + while True: + try: + node = node.children[-1] + except AttributeError: # A Leaf doesn't have children. + return node + + def get_next_leaf(self): + """ + Returns the next leaf in the parser tree. + Returns None if this is the last element in the parser tree. + """ + if self.parent is None: + return None + + node = self + while True: + c = node.parent.children + i = c.index(node) + if i == len(c) - 1: + node = node.parent + if node.parent is None: + return None + else: + node = c[i + 1] + break + + while True: + try: + node = node.children[0] + except AttributeError: # A Leaf doesn't have children. + return node + + @abstractproperty + def start_pos(self) -> Tuple[int, int]: + """ + Returns the starting position of the prefix as a tuple, e.g. `(3, 4)`. + + :return tuple of int: (line, column) + """ + + @abstractproperty + def end_pos(self) -> Tuple[int, int]: + """ + Returns the end position of the prefix as a tuple, e.g. `(3, 4)`. + + :return tuple of int: (line, column) + """ + + @abstractmethod + def get_start_pos_of_prefix(self): + """ + Returns the start_pos of the prefix. This means basically it returns + the end_pos of the last prefix. The `get_start_pos_of_prefix()` of the + prefix `+` in `2 + 1` would be `(1, 1)`, while the start_pos is + `(1, 2)`. + + :return tuple of int: (line, column) + """ + + @abstractmethod + def get_first_leaf(self): + """ + Returns the first leaf of a node or itself if this is a leaf. + """ + + @abstractmethod + def get_last_leaf(self): + """ + Returns the last leaf of a node or itself if this is a leaf. + """ + + @abstractmethod + def get_code(self, include_prefix=True): + """ + Returns the code that was the input for the parser for this node. + + :param include_prefix: Removes the prefix (whitespace and comments) of + e.g. a statement. + """ + + def search_ancestor(self, *node_types: str) -> 'Optional[BaseNode]': + """ + Recursively looks at the parents of this node or leaf and returns the + first found node that matches ``node_types``. Returns ``None`` if no + matching node is found. + + :param node_types: type names that are searched for. + """ + node = self.parent + while node is not None: + if node.type in node_types: + return node + node = node.parent + return None + + def dump(self, *, indent: Optional[Union[int, str]] = 4) -> str: + """ + Returns a formatted dump of the parser tree rooted at this node or leaf. This is + mainly useful for debugging purposes. + + The ``indent`` parameter is interpreted in a similar way as :py:func:`ast.dump`. + If ``indent`` is a non-negative integer or string, then the tree will be + pretty-printed with that indent level. An indent level of 0, negative, or ``""`` + will only insert newlines. ``None`` selects the single line representation. + Using a positive integer indent indents that many spaces per level. If + ``indent`` is a string (such as ``"\\t"``), that string is used to indent each + level. + + :param indent: Indentation style as described above. The default indentation is + 4 spaces, which yields a pretty-printed dump. + + >>> import parso + >>> print(parso.parse("lambda x, y: x + y").dump()) + Module([ + Lambda([ + Keyword('lambda', (1, 0)), + Param([ + Name('x', (1, 7), prefix=' '), + Operator(',', (1, 8)), + ]), + Param([ + Name('y', (1, 10), prefix=' '), + ]), + Operator(':', (1, 11)), + PythonNode('arith_expr', [ + Name('x', (1, 13), prefix=' '), + Operator('+', (1, 15), prefix=' '), + Name('y', (1, 17), prefix=' '), + ]), + ]), + EndMarker('', (1, 18)), + ]) + """ + if indent is None: + newline = False + indent_string = '' + elif isinstance(indent, int): + newline = True + indent_string = ' ' * indent + elif isinstance(indent, str): + newline = True + indent_string = indent + else: + raise TypeError(f"expect 'indent' to be int, str or None, got {indent!r}") + + def _format_dump(node: NodeOrLeaf, indent: str = '', top_level: bool = True) -> str: + result = '' + node_type = type(node).__name__ + if isinstance(node, Leaf): + result += f'{indent}{node_type}(' + if isinstance(node, ErrorLeaf): + result += f'{node.token_type!r}, ' + elif isinstance(node, TypedLeaf): + result += f'{node.type!r}, ' + result += f'{node.value!r}, {node.start_pos!r}' + if node.prefix: + result += f', prefix={node.prefix!r}' + result += ')' + elif isinstance(node, BaseNode): + result += f'{indent}{node_type}(' + if isinstance(node, Node): + result += f'{node.type!r}, ' + result += '[' + if newline: + result += '\n' + for child in node.children: + result += _format_dump(child, indent=indent + indent_string, top_level=False) + result += f'{indent}])' + else: # pragma: no cover + # We shouldn't ever reach here, unless: + # - `NodeOrLeaf` is incorrectly subclassed else where + # - or a node's children list contains invalid nodes or leafs + # Both are unexpected internal errors. + raise TypeError(f'unsupported node encountered: {node!r}') + if not top_level: + if newline: + result += ',\n' + else: + result += ', ' + return result + + return _format_dump(self) + + +class Leaf(NodeOrLeaf): + ''' + Leafs are basically tokens with a better API. Leafs exactly know where they + were defined and what text preceeds them. + ''' + __slots__ = ('value', 'line', 'column', 'prefix') + prefix: str + + def __init__(self, value: str, start_pos: Tuple[int, int], prefix: str = '') -> None: + self.value = value + ''' + :py:func:`str` The value of the current token. + ''' + self.start_pos = start_pos + self.prefix = prefix + ''' + :py:func:`str` Typically a mixture of whitespace and comments. Stuff + that is syntactically irrelevant for the syntax tree. + ''' + self.parent: Optional[BaseNode] = None + ''' + The parent :class:`BaseNode` of this leaf. + ''' + + @property + def start_pos(self) -> Tuple[int, int]: + return self.line, self.column + + @start_pos.setter + def start_pos(self, value: Tuple[int, int]) -> None: + self.line = value[0] + self.column = value[1] + + def get_start_pos_of_prefix(self): + previous_leaf = self.get_previous_leaf() + if previous_leaf is None: + lines = split_lines(self.prefix) + # + 1 is needed because split_lines always returns at least ['']. + return self.line - len(lines) + 1, 0 # It's the first leaf. + return previous_leaf.end_pos + + def get_first_leaf(self): + return self + + def get_last_leaf(self): + return self + + def get_code(self, include_prefix=True): + if include_prefix: + return self.prefix + self.value + else: + return self.value + + @property + def end_pos(self) -> Tuple[int, int]: + lines = split_lines(self.value) + end_pos_line = self.line + len(lines) - 1 + # Check for multiline token + if self.line == end_pos_line: + end_pos_column = self.column + len(lines[-1]) + else: + end_pos_column = len(lines[-1]) + return end_pos_line, end_pos_column + + def __repr__(self): + value = self.value + if not value: + value = self.type + return "<%s: %s>" % (type(self).__name__, value) + + +class TypedLeaf(Leaf): + __slots__ = ('type',) + + def __init__(self, type, value, start_pos, prefix=''): + super().__init__(value, start_pos, prefix) + self.type = type + + +class BaseNode(NodeOrLeaf): + """ + The super class for all nodes. + A node has children, a type and possibly a parent node. + """ + __slots__ = ('children',) + + def __init__(self, children: List[NodeOrLeaf]) -> None: + self.children = children + """ + A list of :class:`NodeOrLeaf` child nodes. + """ + self.parent: Optional[BaseNode] = None + ''' + The parent :class:`BaseNode` of this node. + None if this is the root node. + ''' + for child in children: + child.parent = self + + @property + def start_pos(self) -> Tuple[int, int]: + return self.children[0].start_pos + + def get_start_pos_of_prefix(self): + return self.children[0].get_start_pos_of_prefix() + + @property + def end_pos(self) -> Tuple[int, int]: + return self.children[-1].end_pos + + def _get_code_for_children(self, children, include_prefix): + if include_prefix: + return "".join(c.get_code() for c in children) + else: + first = children[0].get_code(include_prefix=False) + return first + "".join(c.get_code() for c in children[1:]) + + def get_code(self, include_prefix=True): + return self._get_code_for_children(self.children, include_prefix) + + def get_leaf_for_position(self, position, include_prefixes=False): + """ + Get the :py:class:`parso.tree.Leaf` at ``position`` + + :param tuple position: A position tuple, row, column. Rows start from 1 + :param bool include_prefixes: If ``False``, ``None`` will be returned if ``position`` falls + on whitespace or comments before a leaf + :return: :py:class:`parso.tree.Leaf` at ``position``, or ``None`` + """ + def binary_search(lower, upper): + if lower == upper: + element = self.children[lower] + if not include_prefixes and position < element.start_pos: + # We're on a prefix. + return None + # In case we have prefixes, a leaf always matches + try: + return element.get_leaf_for_position(position, include_prefixes) + except AttributeError: + return element + + index = int((lower + upper) / 2) + element = self.children[index] + if position <= element.end_pos: + return binary_search(lower, index) + else: + return binary_search(index + 1, upper) + + if not ((1, 0) <= position <= self.children[-1].end_pos): + raise ValueError('Please provide a position that exists within this node.') + return binary_search(0, len(self.children) - 1) + + def get_first_leaf(self): + return self.children[0].get_first_leaf() + + def get_last_leaf(self): + return self.children[-1].get_last_leaf() + + def __repr__(self): + code = self.get_code().replace('\n', ' ').replace('\r', ' ').strip() + return "<%s: %s@%s,%s>" % \ + (type(self).__name__, code, self.start_pos[0], self.start_pos[1]) + + +class Node(BaseNode): + """Concrete implementation for interior nodes.""" + __slots__ = ('type',) + + def __init__(self, type, children): + super().__init__(children) + self.type = type + + def __repr__(self): + return "%s(%s, %r)" % (self.__class__.__name__, self.type, self.children) + + +class ErrorNode(BaseNode): + """ + A node that contains valid nodes/leaves that we're follow by a token that + was invalid. This basically means that the leaf after this node is where + Python would mark a syntax error. + """ + __slots__ = () + type = 'error_node' + + +class ErrorLeaf(Leaf): + """ + A leaf that is either completely invalid in a language (like `$` in Python) + or is invalid at that position. Like the star in `1 +* 1`. + """ + __slots__ = ('token_type',) + type = 'error_leaf' + + def __init__(self, token_type, value, start_pos, prefix=''): + super().__init__(value, start_pos, prefix) + self.token_type = token_type + + def __repr__(self): + return "<%s: %s:%s, %s>" % \ + (type(self).__name__, self.token_type, repr(self.value), self.start_pos) diff --git a/bundle/jedi-vim/pythonx/parso/parso/utils.py b/bundle/jedi-vim/pythonx/parso/parso/utils.py new file mode 100644 index 000000000..6ad1ea26a --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/parso/utils.py @@ -0,0 +1,194 @@ +import re +import sys +from ast import literal_eval +from functools import total_ordering +from typing import NamedTuple, Sequence, Union + +# The following is a list in Python that are line breaks in str.splitlines, but +# not in Python. In Python only \r (Carriage Return, 0xD) and \n (Line Feed, +# 0xA) are allowed to split lines. +_NON_LINE_BREAKS = ( + '\v', # Vertical Tabulation 0xB + '\f', # Form Feed 0xC + '\x1C', # File Separator + '\x1D', # Group Separator + '\x1E', # Record Separator + '\x85', # Next Line (NEL - Equivalent to CR+LF. + # Used to mark end-of-line on some IBM mainframes.) + '\u2028', # Line Separator + '\u2029', # Paragraph Separator +) + + +class Version(NamedTuple): + major: int + minor: int + micro: int + + +def split_lines(string: str, keepends: bool = False) -> Sequence[str]: + r""" + Intended for Python code. In contrast to Python's :py:meth:`str.splitlines`, + looks at form feeds and other special characters as normal text. Just + splits ``\n`` and ``\r\n``. + Also different: Returns ``[""]`` for an empty string input. + + In Python 2.7 form feeds are used as normal characters when using + str.splitlines. However in Python 3 somewhere there was a decision to split + also on form feeds. + """ + if keepends: + lst = string.splitlines(True) + + # We have to merge lines that were broken by form feed characters. + merge = [] + for i, line in enumerate(lst): + try: + last_chr = line[-1] + except IndexError: + pass + else: + if last_chr in _NON_LINE_BREAKS: + merge.append(i) + + for index in reversed(merge): + try: + lst[index] = lst[index] + lst[index + 1] + del lst[index + 1] + except IndexError: + # index + 1 can be empty and therefore there's no need to + # merge. + pass + + # The stdlib's implementation of the end is inconsistent when calling + # it with/without keepends. One time there's an empty string in the + # end, one time there's none. + if string.endswith('\n') or string.endswith('\r') or string == '': + lst.append('') + return lst + else: + return re.split(r'\n|\r\n|\r', string) + + +def python_bytes_to_unicode( + source: Union[str, bytes], encoding: str = 'utf-8', errors: str = 'strict' +) -> str: + """ + Checks for unicode BOMs and PEP 263 encoding declarations. Then returns a + unicode object like in :py:meth:`bytes.decode`. + + :param encoding: See :py:meth:`bytes.decode` documentation. + :param errors: See :py:meth:`bytes.decode` documentation. ``errors`` can be + ``'strict'``, ``'replace'`` or ``'ignore'``. + """ + def detect_encoding(): + """ + For the implementation of encoding definitions in Python, look at: + - http://www.python.org/dev/peps/pep-0263/ + - http://docs.python.org/2/reference/lexical_analysis.html#encoding-declarations + """ + byte_mark = literal_eval(r"b'\xef\xbb\xbf'") + if source.startswith(byte_mark): + # UTF-8 byte-order mark + return 'utf-8' + + first_two_lines = re.match(br'(?:[^\r\n]*(?:\r\n|\r|\n)){0,2}', source).group(0) + possible_encoding = re.search(br"coding[=:]\s*([-\w.]+)", + first_two_lines) + if possible_encoding: + e = possible_encoding.group(1) + if not isinstance(e, str): + e = str(e, 'ascii', 'replace') + return e + else: + # the default if nothing else has been set -> PEP 263 + return encoding + + if isinstance(source, str): + # only cast str/bytes + return source + + encoding = detect_encoding() + try: + # Cast to unicode + return str(source, encoding, errors) + except LookupError: + if errors == 'replace': + # This is a weird case that can happen if the given encoding is not + # a valid encoding. This usually shouldn't happen with provided + # encodings, but can happen if somebody uses encoding declarations + # like `# coding: foo-8`. + return str(source, 'utf-8', errors) + raise + + +def version_info() -> Version: + """ + Returns a namedtuple of parso's version, similar to Python's + ``sys.version_info``. + """ + from parso import __version__ + tupl = re.findall(r'[a-z]+|\d+', __version__) + return Version(*[x if i == 3 else int(x) for i, x in enumerate(tupl)]) + + +class _PythonVersionInfo(NamedTuple): + major: int + minor: int + + +@total_ordering +class PythonVersionInfo(_PythonVersionInfo): + def __gt__(self, other): + if isinstance(other, tuple): + if len(other) != 2: + raise ValueError("Can only compare to tuples of length 2.") + return (self.major, self.minor) > other + super().__gt__(other) + + return (self.major, self.minor) + + def __eq__(self, other): + if isinstance(other, tuple): + if len(other) != 2: + raise ValueError("Can only compare to tuples of length 2.") + return (self.major, self.minor) == other + super().__eq__(other) + + def __ne__(self, other): + return not self.__eq__(other) + + +def _parse_version(version) -> PythonVersionInfo: + match = re.match(r'(\d+)(?:\.(\d{1,2})(?:\.\d+)?)?((a|b|rc)\d)?$', version) + if match is None: + raise ValueError('The given version is not in the right format. ' + 'Use something like "3.8" or "3".') + + major = int(match.group(1)) + minor = match.group(2) + if minor is None: + # Use the latest Python in case it's not exactly defined, because the + # grammars are typically backwards compatible? + if major == 2: + minor = "7" + elif major == 3: + minor = "6" + else: + raise NotImplementedError("Sorry, no support yet for those fancy new/old versions.") + minor = int(minor) + return PythonVersionInfo(major, minor) + + +def parse_version_string(version: str = None) -> PythonVersionInfo: + """ + Checks for a valid version number (e.g. `3.8` or `3.10.1` or `3`) and + returns a corresponding version info that is always two characters long in + decimal. + """ + if version is None: + version = '%s.%s' % sys.version_info[:2] + if not isinstance(version, str): + raise TypeError('version must be a string like "3.8"') + + return _parse_version(version) diff --git a/bundle/jedi-vim/pythonx/parso/pytest.ini b/bundle/jedi-vim/pythonx/parso/pytest.ini new file mode 100644 index 000000000..cd6a4af65 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/pytest.ini @@ -0,0 +1,12 @@ +[pytest] +addopts = --doctest-modules + +testpaths = parso test + +# Ignore broken files inblackbox test directories +norecursedirs = .* docs scripts normalizer_issue_files build + +# Activate `clean_jedi_cache` fixture for all tests. This should be +# fine as long as we are using `clean_jedi_cache` as a session scoped +# fixture. +usefixtures = clean_parso_cache diff --git a/bundle/jedi-vim/pythonx/parso/scripts/diff_parser_profile.py b/bundle/jedi-vim/pythonx/parso/scripts/diff_parser_profile.py new file mode 100644 index 000000000..93a12029a --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/scripts/diff_parser_profile.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python +""" +Profile a piece of Python code with ``cProfile`` that uses the diff parser. + +Usage: + profile.py [-d] [-s ] + profile.py -h | --help + +Options: + -h --help Show this screen. + -d --debug Enable Jedi internal debugging. + -s Sort the profile results, e.g. cumtime, name [default: time]. +""" + +import cProfile + +from docopt import docopt +from jedi.parser.python import load_grammar +from jedi.parser.diff import DiffParser +from jedi.parser.python import ParserWithRecovery +from jedi.common import splitlines +import jedi + + +def run(parser, lines): + diff_parser = DiffParser(parser) + diff_parser.update(lines) + # Make sure used_names is loaded + parser.module.used_names + + +def main(args): + if args['--debug']: + jedi.set_debug_function(notices=True) + + with open(args['']) as f: + code = f.read() + grammar = load_grammar() + parser = ParserWithRecovery(grammar, code) + # Make sure used_names is loaded + parser.module.used_names + + code = code + '\na\n' # Add something so the diff parser needs to run. + lines = splitlines(code, keepends=True) + cProfile.runctx('run(parser, lines)', globals(), locals(), sort=args['-s']) + + +if __name__ == '__main__': + args = docopt(__doc__) + main(args) diff --git a/bundle/jedi-vim/pythonx/parso/setup.cfg b/bundle/jedi-vim/pythonx/parso/setup.cfg new file mode 100644 index 000000000..433824a35 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/setup.cfg @@ -0,0 +1,25 @@ +[bdist_wheel] +universal=1 + +[flake8] +max-line-length = 100 +ignore = + # do not use bare 'except' + E722, + # don't know why this was ever even an option, 1+1 should be possible. + E226, + # line break before binary operator + W503, + + +[mypy] +disallow_subclassing_any = True + +# Avoid creating future gotchas emerging from bad typing +warn_redundant_casts = True +warn_unused_ignores = True +warn_return_any = True +warn_unused_configs = True +warn_unreachable = True + +strict_equality = True diff --git a/bundle/jedi-vim/pythonx/parso/setup.py b/bundle/jedi-vim/pythonx/parso/setup.py new file mode 100644 index 000000000..0ead19cb3 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/setup.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python + +from __future__ import with_statement + +from setuptools import setup, find_packages + +import parso + + +__AUTHOR__ = 'David Halter' +__AUTHOR_EMAIL__ = 'davidhalter88@gmail.com' + +readme = open('README.rst').read() + '\n\n' + open('CHANGELOG.rst').read() + +setup( + name='parso', + version=parso.__version__, + description='A Python Parser', + author=__AUTHOR__, + author_email=__AUTHOR_EMAIL__, + include_package_data=True, + maintainer=__AUTHOR__, + maintainer_email=__AUTHOR_EMAIL__, + url='https://github.com/davidhalter/parso', + license='MIT', + keywords='python parser parsing', + long_description=readme, + packages=find_packages(exclude=['test']), + package_data={'parso': ['python/grammar*.txt', 'py.typed', '*.pyi', '**/*.pyi']}, + platforms=['any'], + python_requires='>=3.6', + classifiers=[ + 'Development Status :: 4 - Beta', + 'Environment :: Plugins', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Topic :: Software Development :: Libraries :: Python Modules', + 'Topic :: Text Editors :: Integrated Development Environments (IDE)', + 'Topic :: Utilities', + 'Typing :: Typed', + ], + extras_require={ + 'testing': [ + 'pytest<6.0.0', + 'docopt', + ], + 'qa': [ + 'flake8==3.8.3', + 'mypy==0.782', + ], + }, +) diff --git a/bundle/jedi-vim/pythonx/parso/test/__init__.py b/bundle/jedi-vim/pythonx/parso/test/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/jedi-vim/pythonx/parso/test/failing_examples.py b/bundle/jedi-vim/pythonx/parso/test/failing_examples.py new file mode 100644 index 000000000..09714d390 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/failing_examples.py @@ -0,0 +1,415 @@ +# -*- coding: utf-8 -*- +import sys +from textwrap import dedent + + +def indent(code): + lines = code.splitlines(True) + return ''.join([' ' * 2 + line for line in lines]) + + +def build_nested(code, depth, base='def f():\n'): + if depth == 0: + return code + + new_code = base + indent(code) + return build_nested(new_code, depth - 1, base=base) + + +FAILING_EXAMPLES = [ + '1 +', + '?', + 'continue', + 'break', + 'return', + 'yield', + + # SyntaxError from Python/ast.c + 'f(x for x in bar, 1)', + 'from foo import a,', + 'from __future__ import whatever', + 'from __future__ import braces', + 'from .__future__ import whatever', + 'def f(x=3, y): pass', + 'lambda x=3, y: x', + '__debug__ = 1', + 'with x() as __debug__: pass', + + '[]: int', + '[a, b]: int', + '(): int', + '(()): int', + '((())): int', + '{}: int', + 'True: int', + '(a, b): int', + '*star,: int', + 'a, b: int = 3', + 'foo(+a=3)', + 'f(lambda: 1=1)', + 'f(x=1, x=2)', + 'f(**x, y)', + 'f(x=2, y)', + 'f(**x, *y)', + 'f(**x, y=3, z)', + # augassign + 'a, b += 3', + '(a, b) += 3', + '[a, b] += 3', + '[a, 1] += 3', + 'f() += 1', + 'lambda x:None+=1', + '{} += 1', + '{a:b} += 1', + '{1} += 1', + '{*x} += 1', + '(x,) += 1', + '(x, y if a else q) += 1', + '[] += 1', + '[1,2] += 1', + '[] += 1', + 'None += 1', + '... += 1', + 'a > 1 += 1', + '"test" += 1', + '1 += 1', + '1.0 += 1', + '(yield) += 1', + '(yield from x) += 1', + '(x if x else y) += 1', + 'a() += 1', + 'a + b += 1', + '+a += 1', + 'a and b += 1', + '*a += 1', + 'a, b += 1', + 'f"xxx" += 1', + # All assignment tests + 'lambda a: 1 = 1', + '[x for x in y] = 1', + '{x for x in y} = 1', + '{x:x for x in y} = 1', + '(x for x in y) = 1', + 'None = 1', + '... = 1', + 'a == b = 1', + '{a, b} = 1', + '{a: b} = 1', + '1 = 1', + '"" = 1', + 'b"" = 1', + 'b"" = 1', + '"" "" = 1', + '1 | 1 = 3', + '1**1 = 3', + '~ 1 = 3', + 'not 1 = 3', + '1 and 1 = 3', + 'def foo(): (yield 1) = 3', + 'def foo(): x = yield 1 = 3', + 'async def foo(): await x = 3', + '(a if a else a) = a', + 'a, 1 = x', + 'foo() = 1', + # Cases without the equals but other assignments. + 'with x as foo(): pass', + 'del bar, 1', + 'for x, 1 in []: pass', + 'for (not 1) in []: pass', + '[x for 1 in y]', + '[x for a, 3 in y]', + '(x for 1 in y)', + '{x for 1 in y}', + '{x:x for 1 in y}', + # Unicode/Bytes issues. + r'u"\x"', + r'u"\"', + r'u"\u"', + r'u"""\U"""', + r'u"\Uffffffff"', + r"u'''\N{}'''", + r"u'\N{foo}'", + r'b"\x"', + r'b"\"', + 'b"ä"', + + '*a, *b = 3, 3', + 'async def foo(): yield from []', + 'yield from []', + '*a = 3', + 'del *a, b', + 'def x(*): pass', + '(%s *d) = x' % ('a,' * 256), + '{**{} for a in [1]}', + '(True,) = x', + '([False], a) = x', + 'def x(): from math import *', + + # invalid del statements + 'del x + y', + 'del x(y)', + 'async def foo(): del await x', + 'def foo(): del (yield x)', + 'del [x for x in range(10)]', + 'del *x', + 'del *x,', + 'del (*x,)', + 'del [*x]', + 'del x, *y', + 'del *x.y,', + 'del *x[y],', + 'del *x[y::], z', + 'del x, (y, *z)', + 'del (x, *[y, z])', + 'del [x, *(y, [*z])]', + 'del {}', + 'del {x}', + 'del {x, y}', + 'del {x, *y}', + + # invalid starred expressions + '*x', + '(*x)', + '((*x))', + '1 + (*x)', + '*x; 1', + '1; *x', + '1\n*x', + 'x = *y', + 'x: int = *y', + 'def foo(): return *x', + 'def foo(): yield *x', + 'f"{*x}"', + 'for *x in 1: pass', + '[1 for *x in 1]', + + # str/bytes combinations + '"s" b""', + '"s" b"" ""', + 'b"" "" b"" ""', + 'f"s" b""', + 'b"s" f""', + + # Parser/tokenize.c + r'"""', + r'"', + r"'''", + r"'", + r"\blub", + # IndentationError: too many levels of indentation + build_nested('pass', 100), + + # SyntaxErrors from Python/symtable.c + 'def f(x, x): pass', + 'nonlocal a', + + # IndentationError + ' foo', + 'def x():\n 1\n 2', + 'def x():\n 1\n 2', + 'if 1:\nfoo', + 'if 1: blubb\nif 1:\npass\nTrue and False', + + # f-strings + 'f"{}"', + r'f"{\}"', + 'f"{\'\\\'}"', + 'f"{#}"', + "f'{1!b}'", + "f'{1:{5:{3}}}'", + "f'{'", + "f'{'", + "f'}'", + "f'{\"}'", + "f'{\"}'", + # Now nested parsing + "f'{continue}'", + "f'{1;1}'", + "f'{a;}'", + "f'{b\"\" \"\"}'", + # f-string expression part cannot include a backslash + r'''f"{'\n'}"''', + + 'async def foo():\n yield x\n return 1', + 'async def foo():\n yield x\n return 1', + + '[*[] for a in [1]]', + 'async def bla():\n def x(): await bla()', + 'del None', + 'del True', + 'del False', + 'del ...', + + # Errors of global / nonlocal + dedent(''' + def glob(): + x = 3 + x.z + global x'''), + dedent(''' + def glob(): + x = 3 + global x'''), + dedent(''' + def glob(): + x + global x'''), + dedent(''' + def glob(): + x = 3 + x.z + nonlocal x'''), + dedent(''' + def glob(): + x = 3 + nonlocal x'''), + dedent(''' + def glob(): + x + nonlocal x'''), + # Annotation issues + dedent(''' + def glob(): + x[0]: foo + global x'''), + dedent(''' + def glob(): + x.a: foo + global x'''), + dedent(''' + def glob(): + x: foo + global x'''), + dedent(''' + def glob(): + x: foo = 5 + global x'''), + dedent(''' + def glob(): + x: foo = 5 + x + global x'''), + dedent(''' + def glob(): + global x + x: foo = 3 + '''), + # global/nonlocal + param + dedent(''' + def glob(x): + global x + '''), + dedent(''' + def glob(x): + nonlocal x + '''), + dedent(''' + def x(): + a =3 + def z(): + nonlocal a + a = 3 + nonlocal a + '''), + dedent(''' + def x(): + a = 4 + def y(): + global a + nonlocal a + '''), + # Missing binding of nonlocal + dedent(''' + def x(): + nonlocal a + '''), + dedent(''' + def x(): + def y(): + nonlocal a + '''), + dedent(''' + def x(): + a = 4 + def y(): + global a + print(a) + def z(): + nonlocal a + '''), + # Name is assigned before nonlocal declaration + dedent(''' + def x(a): + def y(): + a = 10 + nonlocal a + '''), +] + +if sys.version_info[:2] >= (3, 7): + # This is somehow ok in previous versions. + FAILING_EXAMPLES += [ + 'class X(base for base in bases): pass', + ] + +if sys.version_info[:2] < (3, 8): + FAILING_EXAMPLES += [ + # Python/compile.c + dedent('''\ + for a in [1]: + try: + pass + finally: + continue + '''), # 'continue' not supported inside 'finally' clause" + ] + +if sys.version_info[:2] >= (3, 8): + # assignment expressions from issue#89 + FAILING_EXAMPLES += [ + # Case 2 + '(lambda: x := 1)', + '((lambda: x) := 1)', + # Case 3 + '(a[i] := x)', + '((a[i]) := x)', + '(a(i) := x)', + # Case 4 + '(a.b := c)', + '[(i.i:= 0) for ((i), j) in range(5)]', + # Case 5 + '[i:= 0 for i, j in range(5)]', + '[(i:= 0) for ((i), j) in range(5)]', + '[(i:= 0) for ((i), j), in range(5)]', + '[(i:= 0) for ((i), j.i), in range(5)]', + '[[(i:= i) for j in range(5)] for i in range(5)]', + '[i for i, j in range(5) if True or (i:= 1)]', + '[False and (i:= 0) for i, j in range(5)]', + # Case 6 + '[i+1 for i in (i:= range(5))]', + '[i+1 for i in (j:= range(5))]', + '[i+1 for i in (lambda: (j:= range(5)))()]', + # Case 7 + 'class Example:\n [(j := i) for i in range(5)]', + # Not in that issue + '(await a := x)', + '((await a) := x)', + # new discoveries + '((a, b) := (1, 2))', + '([a, b] := [1, 2])', + '({a, b} := {1, 2})', + '({a: b} := {1: 2})', + '(a + b := 1)', + '(True := 1)', + '(False := 1)', + '(None := 1)', + '(__debug__ := 1)', + # Unparenthesized walrus not allowed in dict literals, dict comprehensions and slices + '{a:="a": b:=1}', + '{y:=1: 2 for x in range(5)}', + 'a[b:=0:1:2]', + ] + # f-string debugging syntax with invalid conversion character + FAILING_EXAMPLES += [ + "f'{1=!b}'", + ] diff --git a/bundle/jedi-vim/pythonx/parso/test/fuzz_diff_parser.py b/bundle/jedi-vim/pythonx/parso/test/fuzz_diff_parser.py new file mode 100644 index 000000000..39b93f21d --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/fuzz_diff_parser.py @@ -0,0 +1,307 @@ +""" +A script to find bugs in the diff parser. + +This script is extremely useful if changes are made to the diff parser. By +running a few thousand iterations, we can assure that the diff parser is in +good shape. + +Usage: + fuzz_diff_parser.py [--pdb|--ipdb] [-l] [-n=] [-x=] random [] + fuzz_diff_parser.py [--pdb|--ipdb] [-l] redo [-o=] [-p] + fuzz_diff_parser.py -h | --help + +Options: + -h --help Show this screen + -n, --maxtries= Maximum of random tries [default: 1000] + -x, --changes= Amount of changes to be done to a file per try [default: 5] + -l, --logging Prints all the logs + -o, --only-last= Only runs the last n iterations; Defaults to running all + -p, --print-code Print all test diffs + --pdb Launch pdb when error is raised + --ipdb Launch ipdb when error is raised +""" + +from __future__ import print_function +import logging +import sys +import os +import random +import pickle + +import parso +from parso.utils import split_lines +from test.test_diff_parser import _check_error_leaves_nodes + +_latest_grammar = parso.load_grammar(version='3.8') +_python_reserved_strings = tuple( + # Keywords are ususally only interesting in combination with spaces after + # them. We don't put a space before keywords, to avoid indentation errors. + s + (' ' if s.isalpha() else '') + for s in _latest_grammar._pgen_grammar.reserved_syntax_strings.keys() +) +_random_python_fragments = _python_reserved_strings + ( + ' ', '\t', '\n', '\r', '\f', 'f"', 'F"""', "fr'", "RF'''", '"', '"""', "'", + "'''", ';', ' some_random_word ', '\\', '#', +) + + +def find_python_files_in_tree(file_path): + if not os.path.isdir(file_path): + yield file_path + return + for root, dirnames, filenames in os.walk(file_path): + if 'chardet' in root: + # Stuff like chardet/langcyrillicmodel.py is just very slow to + # parse and machine generated, so ignore those. + continue + + for name in filenames: + if name.endswith('.py'): + yield os.path.join(root, name) + + +def _print_copyable_lines(lines): + for line in lines: + line = repr(line)[1:-1] + if line.endswith(r'\n'): + line = line[:-2] + '\n' + print(line, end='') + + +def _get_first_error_start_pos_or_none(module): + error_leaf = _check_error_leaves_nodes(module) + return None if error_leaf is None else error_leaf.start_pos + + +class LineReplacement: + def __init__(self, line_nr, new_line): + self._line_nr = line_nr + self._new_line = new_line + + def apply(self, code_lines): + # print(repr(self._new_line)) + code_lines[self._line_nr] = self._new_line + + +class LineDeletion: + def __init__(self, line_nr): + self.line_nr = line_nr + + def apply(self, code_lines): + del code_lines[self.line_nr] + + +class LineCopy: + def __init__(self, copy_line, insertion_line): + self._copy_line = copy_line + self._insertion_line = insertion_line + + def apply(self, code_lines): + code_lines.insert( + self._insertion_line, + # Use some line from the file. This doesn't feel totally + # random, but for the diff parser it will feel like it. + code_lines[self._copy_line] + ) + + +class FileModification: + @classmethod + def generate(cls, code_lines, change_count, previous_file_modification=None): + if previous_file_modification is not None and random.random() > 0.5: + # We want to keep the previous modifications in some cases to make + # more complex parser issues visible. + code_lines = previous_file_modification.apply(code_lines) + added_modifications = previous_file_modification.modification_list + else: + added_modifications = [] + return cls( + added_modifications + + list(cls._generate_line_modifications(code_lines, change_count)), + # work with changed trees more than with normal ones. + check_original=random.random() > 0.8, + ) + + @staticmethod + def _generate_line_modifications(lines, change_count): + def random_line(include_end=False): + return random.randint(0, len(lines) - (not include_end)) + + lines = list(lines) + for _ in range(change_count): + rand = random.randint(1, 4) + if rand == 1: + if len(lines) == 1: + # We cannot delete every line, that doesn't make sense to + # fuzz and it would be annoying to rewrite everything here. + continue + ld = LineDeletion(random_line()) + elif rand == 2: + # Copy / Insertion + # Make it possible to insert into the first and the last line + ld = LineCopy(random_line(), random_line(include_end=True)) + elif rand in (3, 4): + # Modify a line in some weird random ways. + line_nr = random_line() + line = lines[line_nr] + column = random.randint(0, len(line)) + random_string = '' + for _ in range(random.randint(1, 3)): + if random.random() > 0.8: + # The lower characters cause way more issues. + unicode_range = 0x1f if random.randint(0, 1) else 0x3000 + random_string += chr(random.randint(0, unicode_range)) + else: + # These insertions let us understand how random + # keyword/operator insertions work. Theoretically this + # could also be done with unicode insertions, but the + # fuzzer is just way more effective here. + random_string += random.choice(_random_python_fragments) + if random.random() > 0.5: + # In this case we insert at a very random place that + # probably breaks syntax. + line = line[:column] + random_string + line[column:] + else: + # Here we have better chances to not break syntax, because + # we really replace the line with something that has + # indentation. + line = ' ' * random.randint(0, 12) + random_string + '\n' + ld = LineReplacement(line_nr, line) + ld.apply(lines) + yield ld + + def __init__(self, modification_list, check_original): + self.modification_list = modification_list + self._check_original = check_original + + def apply(self, code_lines): + changed_lines = list(code_lines) + for modification in self.modification_list: + modification.apply(changed_lines) + return changed_lines + + def run(self, grammar, code_lines, print_code): + code = ''.join(code_lines) + modified_lines = self.apply(code_lines) + modified_code = ''.join(modified_lines) + + if print_code: + if self._check_original: + print('Original:') + _print_copyable_lines(code_lines) + + print('\nModified:') + _print_copyable_lines(modified_lines) + print() + + if self._check_original: + m = grammar.parse(code, diff_cache=True) + start1 = _get_first_error_start_pos_or_none(m) + + grammar.parse(modified_code, diff_cache=True) + + if self._check_original: + # Also check if it's possible to "revert" the changes. + m = grammar.parse(code, diff_cache=True) + start2 = _get_first_error_start_pos_or_none(m) + assert start1 == start2, (start1, start2) + + +class FileTests: + def __init__(self, file_path, test_count, change_count): + self._path = file_path + with open(file_path, errors='replace') as f: + code = f.read() + self._code_lines = split_lines(code, keepends=True) + self._test_count = test_count + self._code_lines = self._code_lines + self._change_count = change_count + self._file_modifications = [] + + def _run(self, grammar, file_modifications, debugger, print_code=False): + try: + for i, fm in enumerate(file_modifications, 1): + fm.run(grammar, self._code_lines, print_code=print_code) + print('.', end='') + sys.stdout.flush() + print() + except Exception: + print("Issue in file: %s" % self._path) + if debugger: + einfo = sys.exc_info() + pdb = __import__(debugger) + pdb.post_mortem(einfo[2]) + raise + + def redo(self, grammar, debugger, only_last, print_code): + mods = self._file_modifications + if only_last is not None: + mods = mods[-only_last:] + self._run(grammar, mods, debugger, print_code=print_code) + + def run(self, grammar, debugger): + def iterate(): + fm = None + for _ in range(self._test_count): + fm = FileModification.generate( + self._code_lines, self._change_count, + previous_file_modification=fm + ) + self._file_modifications.append(fm) + yield fm + + self._run(grammar, iterate(), debugger) + + +def main(arguments): + debugger = 'pdb' if arguments['--pdb'] else \ + 'ipdb' if arguments['--ipdb'] else None + redo_file = os.path.join(os.path.dirname(__file__), 'fuzz-redo.pickle') + + if arguments['--logging']: + root = logging.getLogger() + root.setLevel(logging.DEBUG) + + ch = logging.StreamHandler(sys.stdout) + ch.setLevel(logging.DEBUG) + root.addHandler(ch) + + grammar = parso.load_grammar() + parso.python.diff.DEBUG_DIFF_PARSER = True + if arguments['redo']: + with open(redo_file, 'rb') as f: + file_tests_obj = pickle.load(f) + only_last = arguments['--only-last'] and int(arguments['--only-last']) + file_tests_obj.redo( + grammar, + debugger, + only_last=only_last, + print_code=arguments['--print-code'] + ) + elif arguments['random']: + # A random file is used to do diff parser checks if no file is given. + # This helps us to find errors in a lot of different files. + file_paths = list(find_python_files_in_tree(arguments[''] or '.')) + max_tries = int(arguments['--maxtries']) + tries = 0 + try: + while tries < max_tries: + path = random.choice(file_paths) + print("Checking %s: %s tries" % (path, tries)) + now_tries = min(1000, max_tries - tries) + file_tests_obj = FileTests(path, now_tries, int(arguments['--changes'])) + file_tests_obj.run(grammar, debugger) + tries += now_tries + except Exception: + with open(redo_file, 'wb') as f: + pickle.dump(file_tests_obj, f) + raise + else: + raise NotImplementedError('Command is not implemented') + + +if __name__ == '__main__': + from docopt import docopt + + arguments = docopt(__doc__) + main(arguments) diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E10.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E10.py new file mode 100644 index 000000000..38d7a1904 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E10.py @@ -0,0 +1,51 @@ +for a in 'abc': + for b in 'xyz': + hello(a) # indented with 8 spaces + #: E903:0 + hello(b) # indented with 1 tab +if True: + #: E101:0 + pass + +#: E122+1 +change_2_log = \ +"""Change 2 by slamb@testclient on 2006/04/13 21:46:23 + + creation +""" + +p4change = { + 2: change_2_log, +} + + +class TestP4Poller(unittest.TestCase): + def setUp(self): + self.setUpGetProcessOutput() + return self.setUpChangeSource() + + def tearDown(self): + pass + + +# +if True: + #: E101:0 E101+1:0 + foo(1, + 2) + + +def test_keys(self): + """areas.json - All regions are accounted for.""" + expected = set([ + #: E101:0 + u'Norrbotten', + #: E101:0 + u'V\xe4sterbotten', + ]) + + +if True: + hello(""" + tab at start of this line +""") diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E101.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E101.py new file mode 100644 index 000000000..cc2471987 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E101.py @@ -0,0 +1,137 @@ +# Used to be the file for W191 + +#: E101+1 +if False: + print # indented with 1 tab + +#: E101+1 +y = x == 2 \ + or x == 3 +#: E101+5 +if ( + x == ( + 3 + ) or + y == 4): + pass +#: E101+3 +if x == 2 \ + or y > 1 \ + or x == 3: + pass +#: E101+3 +if x == 2 \ + or y > 1 \ + or x == 3: + pass + +#: E101+1 +if (foo == bar and baz == frop): + pass +#: E101+1 +if (foo == bar and baz == frop): + pass + +#: E101+2 E101+3 +if start[1] > end_col and not ( + over_indent == 4 and indent_next): + assert (0, "E121 continuation line over-" + "indented for visual indent") + + +#: E101+3 +def long_function_name( + var_one, var_two, var_three, + var_four): + hello(var_one) + + +#: E101+2 +if ((row < 0 or self.moduleCount <= row or + col < 0 or self.moduleCount <= col)): + raise Exception("%s,%s - %s" % (row, col, self.moduleCount)) +#: E101+1 E101+2 E101+3 E101+4 E101+5 E101+6 +if bar: + assert ( + start, 'E121 lines starting with a ' + 'closing bracket should be indented ' + "to match that of the opening " + "bracket's line" + ) + +# you want vertical alignment, so use a parens +#: E101+3 +if ((foo.bar("baz") and + foo.bar("frop") + )): + hello("yes") +#: E101+3 +# also ok, but starting to look like LISP +if ((foo.bar("baz") and + foo.bar("frop"))): + hello("yes") +#: E101+1 +if (a == 2 or b == "abc def ghi" "jkl mno"): + assert True +#: E101+2 +if (a == 2 or b == """abc def ghi +jkl mno"""): + assert True +#: E101+1 E101+2 +if length > options.max_line_length: + assert options.max_line_length, \ + "E501 line too long (%d characters)" % length + + +#: E101+1 E101+2 +if os.path.exists(os.path.join(path, PEP8_BIN)): + cmd = ([os.path.join(path, PEP8_BIN)] + + self._pep8_options(targetfile)) +# TODO Tabs in docstrings shouldn't be there, use \t. +''' + multiline string with tab in it''' +# Same here. +'''multiline string + with tabs + and spaces +''' +# Okay +'''sometimes, you just need to go nuts in a multiline string + and allow all sorts of crap + like mixed tabs and spaces + +or trailing whitespace +or long long long long long long long long long long long long long long long long long lines +''' # noqa +# Okay +'''this one + will get no warning +even though the noqa comment is not immediately after the string +''' + foo # noqa + +#: E101+2 +if foo is None and bar is "frop" and \ + blah == 'yeah': + blah = 'yeahnah' + + +#: E101+1 E101+2 E101+3 +if True: + foo( + 1, + 2) + + +#: E101+1 E101+2 E101+3 E101+4 E101+5 +def test_keys(self): + """areas.json - All regions are accounted for.""" + expected = set([ + u'Norrbotten', + u'V\xe4sterbotten', + ]) + + +#: E101+1 +x = [ + 'abc' +] diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E11.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E11.py new file mode 100644 index 000000000..9b97f3980 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E11.py @@ -0,0 +1,60 @@ +if x > 2: + #: E111:2 + hello(x) +if True: + #: E111:5 + print + #: E111:6 + # + #: E111:2 + # what + # Comment is fine +# Comment is also fine + +if False: + pass +print +print +#: E903:0 + print +mimetype = 'application/x-directory' +#: E111:5 + # 'httpd/unix-directory' +create_date = False + + +def start(self): + # foo + #: E111:8 + # bar + if True: # Hello + self.master.start() # Comment + # try: + #: E111:12 + # self.master.start() + # except MasterExit: + #: E111:12 + # self.shutdown() + # finally: + #: E111:12 + # sys.exit() + # Dedent to the first level + #: E111:6 + # error +# Dedent to the base level +#: E111:2 + # Also wrongly indented. +# Indent is correct. + + +def start(self): # Correct comment + if True: + #: E111:0 +# try: + #: E111:0 +# self.master.start() + #: E111:0 +# except MasterExit: + #: E111:0 +# self.shutdown() + self.master.start() # comment diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E12_first.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E12_first.py new file mode 100644 index 000000000..8dc65a5a4 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E12_first.py @@ -0,0 +1,78 @@ +abc = "E121", ( + #: E121:2 + "dent") +abc = "E122", ( + #: E121:0 +"dent") +my_list = [ + 1, 2, 3, + 4, 5, 6, + #: E123 + ] +abc = "E124", ("visual", + "indent_two" + #: E124:14 + ) +abc = "E124", ("visual", + "indent_five" + #: E124:0 +) +a = (123, + #: E124:0 +) +#: E129+1:4 +if (row < 0 or self.moduleCount <= row or + col < 0 or self.moduleCount <= col): + raise Exception("%s,%s - %s" % (row, col, self.moduleCount)) + +abc = "E126", ( + #: E126:12 + "dent") +abc = "E126", ( + #: E126:8 + "dent") +abc = "E127", ("over-", + #: E127:18 + "over-indent") +abc = "E128", ("visual", + #: E128:4 + "hanging") +abc = "E128", ("under-", + #: E128:14 + "under-indent") + + +my_list = [ + 1, 2, 3, + 4, 5, 6, + #: E123:5 + ] +result = { + #: E121:3 + 'key1': 'value', + #: E121:3 + 'key2': 'value', +} +rv.update(dict.fromkeys(( + 'qualif_nr', 'reasonComment_en', 'reasonComment_fr', + 'reasonComment_de', 'reasonComment_it'), + #: E128:10 + '?'), + "foo") + +abricot = 3 + \ + 4 + \ + 5 + 6 +abc = "hello", ( + + "there", + #: E126:5 + # "john", + "dude") +part = set_mimetype(( + a.get('mime_type', 'text')), + 'default') +part = set_mimetype(( + a.get('mime_type', 'text')), + #: E127:21 + 'default') diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E12_not_first.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E12_not_first.py new file mode 100644 index 000000000..fc3b5f933 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E12_not_first.py @@ -0,0 +1,356 @@ +# The issue numbers described in this file are part of the pycodestyle tracker +# and not of parso. +# Originally there were no issues in here, I (dave) added the ones that were +# necessary and IMO useful. +if ( + x == ( + 3 + ) or + y == 4): + pass + +y = x == 2 \ + or x == 3 + +#: E129+1:4 +if x == 2 \ + or y > 1 \ + or x == 3: + pass + +if x == 2 \ + or y > 1 \ + or x == 3: + pass + + +if (foo == bar and + baz == frop): + pass + +#: E129+1:4 E129+2:4 E123+3 +if ( + foo == bar and + baz == frop +): + pass + +if ( + foo == bar and + baz == frop + #: E129:4 + ): + pass + +a = ( +) + +a = (123, + ) + + +if start[1] > end_col and not ( + over_indent == 4 and indent_next): + assert (0, "E121 continuation line over-" + "indented for visual indent") + + +abc = "OK", ("visual", + "indent") + +abc = "Okay", ("visual", + "indent_three" + ) + +abc = "a-ok", ( + "there", + "dude", +) + +abc = "hello", ( + "there", + "dude") + +abc = "hello", ( + + "there", + # "john", + "dude") + +abc = "hello", ( + "there", "dude") + +abc = "hello", ( + "there", "dude", +) + +# Aligned with opening delimiter +foo = long_function_name(var_one, var_two, + var_three, var_four) + +# Extra indentation is not necessary. +foo = long_function_name( + var_one, var_two, + var_three, var_four) + + +arm = 'AAA' \ + 'BBB' \ + 'CCC' + +bbb = 'AAA' \ + 'BBB' \ + 'CCC' + +cc = ('AAA' + 'BBB' + 'CCC') + +cc = {'text': 'AAA' + 'BBB' + 'CCC'} + +cc = dict(text='AAA' + 'BBB') + +sat = 'AAA' \ + 'BBB' \ + 'iii' \ + 'CCC' + +abricot = (3 + + 4 + + 5 + 6) + +#: E122+1:4 +abricot = 3 + \ + 4 + \ + 5 + 6 + +part = [-1, 2, 3, + 4, 5, 6] + +#: E128+1:8 +part = [-1, (2, 3, + 4, 5, 6), 7, + 8, 9, 0] + +fnct(1, 2, 3, + 4, 5, 6) + +fnct(1, 2, 3, + 4, 5, 6, + 7, 8, 9, + 10, 11) + + +def long_function_name( + var_one, var_two, var_three, + var_four): + hello(var_one) + + +if ((row < 0 or self.moduleCount <= row or + col < 0 or self.moduleCount <= col)): + raise Exception("%s,%s - %s" % (row, col, self.moduleCount)) + + +result = { + 'foo': [ + 'bar', { + 'baz': 'frop', + } + ] +} + + +foo = my.func({ + "foo": "bar", +}, "baz") + + +fooff(aaaa, + cca( + vvv, + dadd + ), fff, + ggg) + +fooff(aaaa, + abbb, + cca( + vvv, + aaa, + dadd), + "visual indentation is not a multiple of four",) + +if bar: + assert ( + start, 'E121 lines starting with a ' + 'closing bracket should be indented ' + "to match that of the opening " + "bracket's line" + ) + +# you want vertical alignment, so use a parens +if ((foo.bar("baz") and + foo.bar("frop") + )): + hello("yes") + +# also ok, but starting to look like LISP +if ((foo.bar("baz") and + foo.bar("frop"))): + hello("yes") + +#: E129+1:4 E127+2:9 +if (a == 2 or + b == "abc def ghi" + "jkl mno"): + assert True + +#: E129+1:4 +if (a == 2 or + b == """abc def ghi +jkl mno"""): + assert True + +if length > options.max_line_length: + assert options.max_line_length, \ + "E501 line too long (%d characters)" % length + + +# blub + + +asd = 'l.{line}\t{pos}\t{name}\t{text}'.format( + line=token[2][0], + pos=pos, + name=tokenize.tok_name[token[0]], + text=repr(token[1]), +) + +#: E121+1:6 E121+2:6 +hello('%-7d %s per second (%d total)' % ( + options.counters[key] / elapsed, key, + options.counters[key])) + + +if os.path.exists(os.path.join(path, PEP8_BIN)): + cmd = ([os.path.join(path, PEP8_BIN)] + + self._pep8_options(targetfile)) + + +fixed = (re.sub(r'\t+', ' ', target[c::-1], 1)[::-1] + + target[c + 1:]) + +fixed = ( + re.sub(r'\t+', ' ', target[c::-1], 1)[::-1] + + target[c + 1:] +) + + +if foo is None and bar is "frop" and \ + blah == 'yeah': + blah = 'yeahnah' + + +"""This is a multi-line + docstring.""" + + +if blah: + # is this actually readable? :) + multiline_literal = """ +while True: + if True: + 1 +""".lstrip() + multiline_literal = ( + """ +while True: + if True: + 1 +""".lstrip() + ) + multiline_literal = ( + """ +while True: + if True: + 1 +""" + .lstrip() + ) + + +if blah: + multiline_visual = (""" +while True: + if True: + 1 +""" + .lstrip()) + + +rv = {'aaa': 42} +rv.update(dict.fromkeys(( + #: E121:4 E121+1:4 + 'qualif_nr', 'reasonComment_en', 'reasonComment_fr', + 'reasonComment_de', 'reasonComment_it'), '?')) + +rv.update(dict.fromkeys(('qualif_nr', 'reasonComment_en', + 'reasonComment_fr', 'reasonComment_de', + 'reasonComment_it'), '?')) + +#: E128+1:10 +rv.update(dict.fromkeys(('qualif_nr', 'reasonComment_en', 'reasonComment_fr', + 'reasonComment_de', 'reasonComment_it'), '?')) + + +rv.update(dict.fromkeys( + ('qualif_nr', 'reasonComment_en', 'reasonComment_fr', + 'reasonComment_de', 'reasonComment_it'), '?' + ), "foo", context={ + 'alpha': 4, 'beta': 53242234, 'gamma': 17, + }) + + +rv.update( + dict.fromkeys(( + 'qualif_nr', 'reasonComment_en', 'reasonComment_fr', + 'reasonComment_de', 'reasonComment_it'), '?'), + "foo", + context={ + 'alpha': 4, 'beta': 53242234, 'gamma': 17, + }, +) + + +event_obj.write(cursor, user_id, { + 'user': user, + 'summary': text, + 'data': data, + }) + +event_obj.write(cursor, user_id, { + 'user': user, + 'summary': text, + 'data': {'aaa': 1, 'bbb': 2}, + }) + +event_obj.write(cursor, user_id, { + 'user': user, + 'summary': text, + 'data': { + 'aaa': 1, + 'bbb': 2}, + }) + +event_obj.write(cursor, user_id, { + 'user': user, + 'summary': text, + 'data': {'timestamp': now, 'content': { + 'aaa': 1, + 'bbb': 2 + }}, + }) diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E12_not_second.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E12_not_second.py new file mode 100644 index 000000000..e7c18e0ec --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E12_not_second.py @@ -0,0 +1,294 @@ + +def qualify_by_address( + self, cr, uid, ids, context=None, + params_to_check=frozenset(QUALIF_BY_ADDRESS_PARAM)): + """ This gets called by the web server """ + + +def qualify_by_address(self, cr, uid, ids, context=None, + params_to_check=frozenset(QUALIF_BY_ADDRESS_PARAM)): + """ This gets called by the web server """ + + +_ipv4_re = re.compile('^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.' + '(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.' + '(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.' + '(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$') + + +fct(""" + AAA """ + status_2_string) + + +if context: + msg = """\ +action: GET-CONFIG +payload: + ip_address: "%(ip)s" + username: "%(username)s" +""" % context + + +if context: + msg = """\ +action: \ +GET-CONFIG +""" % context + + +if context: + #: E122+2:0 + msg = """\ +action: """\ +"""GET-CONFIG +""" % context + + +def unicode2html(s): + """Convert the characters &<>'" in string s to HTML-safe sequences. + Convert newline to
too.""" + #: E127+1:28 + return unicode((s or '').replace('&', '&') + .replace('\n', '
\n')) + + +parser.add_option('--count', action='store_true', + help="print total number of errors and warnings " + "to standard error and set exit code to 1 if " + "total is not null") + +parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE, + help="exclude files or directories which match these " + "comma separated patterns (default: %s)" % + DEFAULT_EXCLUDE) + +add_option('--count', + #: E135+1 + help="print total number of errors " + "to standard error total is not null") + +add_option('--count', + #: E135+2:11 + help="print total number of errors " + "to standard error " + "total is not null") + + +help = ("print total number of errors " + + "to standard error") + +help = "print total number of errors " \ + "to standard error" + +help = u"print total number of errors " \ + u"to standard error" + +help = b"print total number of errors " \ + b"to standard error" + +#: E122+1:5 +help = br"print total number of errors " \ + br"to standard error" + +d = dict('foo', help="exclude files or directories which match these " + #: E135:9 + "comma separated patterns (default: %s)" % DEFAULT_EXCLUDE) + +d = dict('foo', help=u"exclude files or directories which match these " + u"comma separated patterns (default: %s)" + % DEFAULT_EXCLUDE) + +#: E135+1:9 E135+2:9 +d = dict('foo', help=b"exclude files or directories which match these " + b"comma separated patterns (default: %s)" + % DEFAULT_EXCLUDE) + +d = dict('foo', help=br"exclude files or directories which match these " + br"comma separated patterns (default: %s)" % + DEFAULT_EXCLUDE) + +d = dict('foo', + help="exclude files or directories which match these " + "comma separated patterns (default: %s)" % + DEFAULT_EXCLUDE) + +d = dict('foo', + help="exclude files or directories which match these " + "comma separated patterns (default: %s, %s)" % + (DEFAULT_EXCLUDE, DEFAULT_IGNORE) + ) + +d = dict('foo', + help="exclude files or directories which match these " + "comma separated patterns (default: %s, %s)" % + # who knows what might happen here? + (DEFAULT_EXCLUDE, DEFAULT_IGNORE) + ) + +# parens used to allow the indenting. +troublefree_hash = { + "hash": "value", + "long": ("the quick brown fox jumps over the lazy dog before doing a " + "somersault"), + "long key that tends to happen more when you're indented": ( + "stringwithalongtoken you don't want to break" + ), +} + +# another accepted form +troublefree_hash = { + "hash": "value", + "long": "the quick brown fox jumps over the lazy dog before doing " + "a somersault", + ("long key that tends to happen more " + "when you're indented"): "stringwithalongtoken you don't want to break", +} +# confusing but accepted... don't do that +troublesome_hash = { + "hash": "value", + "long": "the quick brown fox jumps over the lazy dog before doing a " + #: E135:4 + "somersault", + "longer": + "the quick brown fox jumps over the lazy dog before doing a " + "somersaulty", + "long key that tends to happen more " + "when you're indented": "stringwithalongtoken you don't want to break", +} + +d = dict('foo', + help="exclude files or directories which match these " + "comma separated patterns (default: %s)" % + DEFAULT_EXCLUDE + ) +d = dict('foo', + help="exclude files or directories which match these " + "comma separated patterns (default: %s)" % DEFAULT_EXCLUDE, + foobar="this clearly should work, because it is at " + "the right indent level", + ) + +rv.update(dict.fromkeys( + ('qualif_nr', 'reasonComment_en', 'reasonComment_fr', + 'reasonComment_de', 'reasonComment_it'), + '?'), "foo", + context={'alpha': 4, 'beta': 53242234, 'gamma': 17}) + + +def f(): + try: + if not Debug: + hello(''' +If you would like to see debugging output, +try: %s -d5 +''' % sys.argv[0]) + + +# The try statement above was not finished. +#: E901 +d = { # comment + 1: 2 +} + +# issue 138 (we won't allow this in parso) +#: E126+2:9 +[ + 12, # this is a multi-line inline + # comment +] +# issue 151 +#: E122+1:3 +if a > b and \ + c > d: + moo_like_a_cow() + +my_list = [ + 1, 2, 3, + 4, 5, 6, +] + +my_list = [1, 2, 3, + 4, 5, 6, + ] + +result = some_function_that_takes_arguments( + 'a', 'b', 'c', + 'd', 'e', 'f', +) + +result = some_function_that_takes_arguments('a', 'b', 'c', + 'd', 'e', 'f', + ) + +# issue 203 +dica = { + ('abc' + 'def'): ( + 'abc'), +} + +(abcdef[0] + [1]) = ( + 'abc') + +('abc' + 'def') == ( + 'abc') + +# issue 214 +bar( + 1).zap( + 2) + +bar( + 1).zap( + 2) + +if True: + + def example_issue254(): + return [node.copy( + ( + replacement + # First, look at all the node's current children. + for child in node.children + # Replace them. + for replacement in replace(child) + ), + dict(name=token.undefined) + )] + + +def valid_example(): + return [node.copy(properties=dict( + (key, val if val is not None else token.undefined) + for key, val in node.items() + ))] + + +foo([ + 'bug' +]) + +# issue 144, finally! +some_hash = { + "long key that tends to happen more when you're indented": + "stringwithalongtoken you don't want to break", +} + +{ + 1: + 999999 if True + else 0, +} + + +abc = dedent( + ''' + mkdir -p ./{build}/ + mv ./build/ ./{build}/%(revision)s/ + '''.format( + build='build', + # more stuff + ) +) diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E12_second.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E12_second.py new file mode 100644 index 000000000..5488ea40e --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E12_second.py @@ -0,0 +1,195 @@ +if True: + result = some_function_that_takes_arguments( + 'a', 'b', 'c', + 'd', 'e', 'f', + #: E123:0 +) +#: E122+1 +if some_very_very_very_long_variable_name or var \ +or another_very_long_variable_name: + raise Exception() +#: E122+1 +if some_very_very_very_long_variable_name or var[0] \ +or another_very_long_variable_name: + raise Exception() +if True: + #: E122+1 + if some_very_very_very_long_variable_name or var \ + or another_very_long_variable_name: + raise Exception() +if True: + #: E122+1 + if some_very_very_very_long_variable_name or var[0] \ + or another_very_long_variable_name: + raise Exception() + +#: E901+1:8 +dictionary = [ + "is": { + # Might be a E122:4, but is not because the code is invalid Python. + "nested": yes(), + }, +] +setup('', + scripts=[''], + classifiers=[ + #: E121:6 + 'Development Status :: 4 - Beta', + 'Environment :: Console', + 'Intended Audience :: Developers', + ]) + + +#: E123+2:4 E291:15 +abc = "E123", ( + "bad", "hanging", "close" + ) + +result = { + 'foo': [ + 'bar', { + 'baz': 'frop', + #: E123 + } + #: E123 + ] + #: E123 + } +result = some_function_that_takes_arguments( + 'a', 'b', 'c', + 'd', 'e', 'f', + #: E123 + ) +my_list = [1, 2, 3, + 4, 5, 6, + #: E124:0 +] +my_list = [1, 2, 3, + 4, 5, 6, + #: E124:19 + ] +#: E124+2 +result = some_function_that_takes_arguments('a', 'b', 'c', + 'd', 'e', 'f', +) +fooff(aaaa, + cca( + vvv, + dadd + ), fff, + #: E124:0 +) +fooff(aaaa, + ccaaa( + vvv, + dadd + ), + fff, + #: E124:0 +) +d = dict('foo', + help="exclude files or directories which match these " + "comma separated patterns (default: %s)" % DEFAULT_EXCLUDE + #: E124:14 + ) + +if line_removed: + self.event(cr, uid, + #: E128:8 + name="Removing the option for contract", + #: E128:8 + description="contract line has been removed", + #: E124:8 + ) + +#: E129+1:4 +if foo is None and bar is "frop" and \ + blah == 'yeah': + blah = 'yeahnah' + + +#: E129+1:4 E129+2:4 +def long_function_name( + var_one, var_two, var_three, + var_four): + hello(var_one) + + +def qualify_by_address( + #: E129:4 E129+1:4 + self, cr, uid, ids, context=None, + params_to_check=frozenset(QUALIF_BY_ADDRESS_PARAM)): + """ This gets called by the web server """ + + +#: E129+1:4 E129+2:4 +if (a == 2 or + b == "abc def ghi" + "jkl mno"): + True + +my_list = [ + 1, 2, 3, + 4, 5, 6, + #: E123:8 + ] + +abris = 3 + \ + 4 + \ + 5 + 6 + +fixed = re.sub(r'\t+', ' ', target[c::-1], 1)[::-1] + \ + target[c + 1:] + +rv.update(dict.fromkeys(( + 'qualif_nr', 'reasonComment_en', 'reasonComment_fr', + #: E121:12 + 'reasonComment_de', 'reasonComment_it'), + '?'), + #: E128:4 + "foo") +#: E126+1:8 +eat_a_dict_a_day({ + "foo": "bar", +}) +#: E129+1:4 +if ( + x == ( + 3 + #: E129:4 + ) or + y == 4): + pass +#: E129+1:4 E121+2:8 E129+3:4 +if ( + x == ( + 3 + ) or + x == ( + # This one has correct indentation. + 3 + #: E129:4 + ) or + y == 4): + pass +troublesome_hash = { + "hash": "value", + #: E135+1:8 + "long": "the quick brown fox jumps over the lazy dog before doing a " + "somersault", +} + +# Arguments on first line forbidden when not using vertical alignment +#: E128+1:4 +foo = long_function_name(var_one, var_two, + var_three, var_four) + +#: E128+1:4 +hello('l.%s\t%s\t%s\t%r' % + (token[2][0], pos, tokenize.tok_name[token[0]], token[1])) + + +def qualify_by_address(self, cr, uid, ids, context=None, + #: E128:8 + params_to_check=frozenset(QUALIF_BY_ADDRESS_PARAM)): + """ This gets called by the web server """ diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E12_third.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E12_third.py new file mode 100644 index 000000000..26697fed7 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E12_third.py @@ -0,0 +1,116 @@ +#: E128+1 +foo(1, 2, 3, +4, 5, 6) +#: E128+1:1 +foo(1, 2, 3, + 4, 5, 6) +#: E128+1:2 +foo(1, 2, 3, + 4, 5, 6) +#: E128+1:3 +foo(1, 2, 3, + 4, 5, 6) +foo(1, 2, 3, + 4, 5, 6) +#: E127+1:5 +foo(1, 2, 3, + 4, 5, 6) +#: E127+1:6 +foo(1, 2, 3, + 4, 5, 6) +#: E127+1:7 +foo(1, 2, 3, + 4, 5, 6) +#: E127+1:8 +foo(1, 2, 3, + 4, 5, 6) +#: E127+1:9 +foo(1, 2, 3, + 4, 5, 6) +#: E127+1:10 +foo(1, 2, 3, + 4, 5, 6) +#: E127+1:11 +foo(1, 2, 3, + 4, 5, 6) +#: E127+1:12 +foo(1, 2, 3, + 4, 5, 6) +#: E127+1:13 +foo(1, 2, 3, + 4, 5, 6) +if line_removed: + #: E128+1:14 E128+2:14 + self.event(cr, uid, + name="Removing the option for contract", + description="contract line has been removed", + ) + +if line_removed: + self.event(cr, uid, + #: E127:16 + name="Removing the option for contract", + #: E127:16 + description="contract line has been removed", + #: E124:16 + ) +rv.update(d=('a', 'b', 'c'), + #: E127:13 + e=42) + +#: E135+2:17 +rv.update(d=('a' + 'b', 'c'), + e=42, f=42 + + 42) +rv.update(d=('a' + 'b', 'c'), + e=42, f=42 + + 42) +#: E127+1:26 +input1 = {'a': {'calc': 1 + 2}, 'b': 1 + + 42} +#: E128+2:17 +rv.update(d=('a' + 'b', 'c'), + e=42, f=(42 + + 42)) + +if True: + def example_issue254(): + #: + return [node.copy( + ( + #: E121:16 E121+3:20 + replacement + # First, look at all the node's current children. + for child in node.children + for replacement in replace(child) + ), + dict(name=token.undefined) + )] +# TODO multiline docstring are currently not handled. E125+1:4? +if (""" + """): + pass + +# TODO same +for foo in """ + abc + 123 + """.strip().split(): + hello(foo) +abc = dedent( + ''' + mkdir -p ./{build}/ + mv ./build/ ./{build}/%(revision)s/ + '''.format( + #: E121:4 E121+1:4 E123+2:0 + build='build', + # more stuff +) +) +#: E701+1: E122+1 +if True:\ +hello(True) + +#: E128+1 +foobar(a +, end=' ') diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E20.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E20.py new file mode 100644 index 000000000..44986fa96 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E20.py @@ -0,0 +1,52 @@ +#: E201:5 +spam( ham[1], {eggs: 2}) +#: E201:9 +spam(ham[ 1], {eggs: 2}) +#: E201:14 +spam(ham[1], { eggs: 2}) + +# Okay +spam(ham[1], {eggs: 2}) + + +#: E202:22 +spam(ham[1], {eggs: 2} ) +#: E202:21 +spam(ham[1], {eggs: 2 }) +#: E202:10 +spam(ham[1 ], {eggs: 2}) +# Okay +spam(ham[1], {eggs: 2}) + +result = func( + arg1='some value', + arg2='another value', +) + +result = func( + arg1='some value', + arg2='another value' +) + +result = [ + item for item in items + if item > 5 +] + +#: E203:9 +if x == 4 : + foo(x, y) + x, y = y, x +if x == 4: + #: E203:12 E702:13 + a = x, y ; x, y = y, x +if x == 4: + foo(x, y) + #: E203:12 + x, y = y , x +# Okay +if x == 4: + foo(x, y) + x, y = y, x +a[b1, :1] == 3 +b = a[:, b1] diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E21.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E21.py new file mode 100644 index 000000000..f65616e8a --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E21.py @@ -0,0 +1,16 @@ +#: E211:4 +spam (1) +#: E211:4 E211:19 +dict ['key'] = list [index] +#: E211:11 +dict['key'] ['subkey'] = list[index] +# Okay +spam(1) +dict['key'] = list[index] + + +# This is not prohibited by PEP8, but avoid it. +# Dave: I think this is extremely stupid. Use the same convention everywhere. +#: E211:9 +class Foo (Bar, Baz): + pass diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E22.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E22.py new file mode 100644 index 000000000..82ff6a440 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E22.py @@ -0,0 +1,156 @@ +a = 12 + 3 +#: E221:5 E229:8 +b = 4 + 5 +#: E221:1 +x = 1 +#: E221:1 +y = 2 +long_variable = 3 +#: E221:4 +x[0] = 1 +#: E221:4 +x[1] = 2 +long_variable = 3 +#: E221:8 E229:19 +x = f(x) + 1 +y = long_variable + 2 +#: E221:8 E229:19 +z = x[0] + 3 +#: E221+2:13 +text = """ + bar + foo %s""" % rofl +# Okay +x = 1 +y = 2 +long_variable = 3 + + +#: E221:7 +a = a + 1 +b = b + 10 +#: E221:3 +x = -1 +#: E221:3 +y = -2 +long_variable = 3 +#: E221:6 +x[0] = 1 +#: E221:6 +x[1] = 2 +long_variable = 3 + + +#: E223+1:1 +foobart = 4 +a = 3 # aligned with tab + + +#: E223:4 +a += 1 +b += 1000 + + +#: E225:12 +submitted +=1 +#: E225:9 +submitted+= 1 +#: E225:3 +c =-1 +#: E229:7 +x = x /2 - 1 +#: E229:11 +c = alpha -4 +#: E229:10 +c = alpha- 4 +#: E229:8 +z = x **y +#: E229:14 +z = (x + 1) **y +#: E229:13 +z = (x + 1)** y +#: E227:14 +_1kB = _1MB >>10 +#: E227:11 +_1kB = _1MB>> 10 +#: E225:1 E225:2 E229:4 +i=i+ 1 +#: E225:1 E225:2 E229:5 +i=i +1 +#: E225:1 E225:2 +i=i+1 +#: E225:3 +i =i+1 +#: E225:1 +i= i+1 +#: E229:8 +c = (a +b)*(a - b) +#: E229:7 +c = (a+ b)*(a - b) + +z = 2//30 +c = (a+b) * (a-b) +x = x*2 - 1 +x = x/2 - 1 +# TODO whitespace should be the other way around according to pep8. +x = x / 2-1 + +hypot2 = x*x + y*y +c = (a + b)*(a - b) + + +def halves(n): + return (i//2 for i in range(n)) + + +#: E227:11 E227:13 +_1kB = _1MB>>10 +#: E227:11 E227:13 +_1MB = _1kB<<10 +#: E227:5 E227:6 +a = b|c +#: E227:5 E227:6 +b = c&a +#: E227:5 E227:6 +c = b^a +#: E228:5 E228:6 +a = b%c +#: E228:9 E228:10 +msg = fmt%(errno, errmsg) +#: E228:25 E228:26 +msg = "Error %d occurred"%errno + +#: E228:7 +a = b %c +a = b % c + +# Okay +i = i + 1 +submitted += 1 +x = x * 2 - 1 +hypot2 = x * x + y * y +c = (a + b) * (a - b) +_1MiB = 2 ** 20 +_1TiB = 2**30 +foo(bar, key='word', *args, **kwargs) +baz(**kwargs) +negative = -1 +spam(-1) +-negative +func1(lambda *args, **kw: (args, kw)) +func2(lambda a, b=h[:], c=0: (a, b, c)) +if not -5 < x < +5: + #: E227:12 + print >>sys.stderr, "x is out of range." +print >> sys.stdout, "x is an integer." +x = x / 2 - 1 + + +def squares(n): + return (i**2 for i in range(n)) + + +ENG_PREFIXES = { + -6: "\u03bc", # Greek letter mu + -3: "m", +} diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E23.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E23.py new file mode 100644 index 000000000..47f1447a2 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E23.py @@ -0,0 +1,16 @@ +#: E231:7 +a = (1,2) +#: E231:5 +a[b1,:] +#: E231:10 +a = [{'a':''}] +# Okay +a = (4,) +#: E202:7 +b = (5, ) +c = {'text': text[5:]} + +result = { + 'key1': 'value', + 'key2': 'value', +} diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E25.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E25.py new file mode 100644 index 000000000..8cf53147f --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E25.py @@ -0,0 +1,36 @@ +#: E251:11 E251:13 +def foo(bar = False): + '''Test function with an error in declaration''' + pass + + +#: E251:8 +foo(bar= True) +#: E251:7 +foo(bar =True) +#: E251:7 E251:9 +foo(bar = True) +#: E251:13 +y = bar(root= "sdasd") +parser.add_argument('--long-option', + #: E135+1:20 + default= + "/rather/long/filesystem/path/here/blah/blah/blah") +parser.add_argument('--long-option', + default= + "/rather/long/filesystem") +# TODO this looks so stupid. +parser.add_argument('--long-option', default + ="/rather/long/filesystem/path/here/blah/blah/blah") +#: E251+2:7 E251+2:9 +foo(True, + baz=(1, 2), + biz = 'foo' + ) +# Okay +foo(bar=(1 == 1)) +foo(bar=(1 != 1)) +foo(bar=(1 >= 1)) +foo(bar=(1 <= 1)) +(options, args) = parser.parse_args() +d[type(None)] = _deepcopy_atomic diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E26.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E26.py new file mode 100644 index 000000000..4774852a0 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E26.py @@ -0,0 +1,78 @@ +#: E261:4 +pass # an inline comment +#: E261:4 +pass# an inline comment + +# Okay +pass # an inline comment +pass # an inline comment +#: E262:11 +x = x + 1 #Increment x +#: E262:11 +x = x + 1 # Increment x +#: E262:11 +x = y + 1 #: Increment x +#: E265 +#Block comment +a = 1 +#: E265+1 +m = 42 +#! This is important +mx = 42 - 42 + +# Comment without anything is not an issue. +# +# However if there are comments at the end without anything it obviously +# doesn't make too much sense. +#: E262:9 +foo = 1 # + + +#: E266+2:4 E266+5:4 +def how_it_feel(r): + + ### This is a variable ### + a = 42 + + ### Of course it is unused + return + + +#: E266 E266+1 +##if DEBUG: +## logging.error() +#: E266 +######################################### + +# Not at the beginning of a file +#: E265 +#!/usr/bin/env python + +# Okay + +pass # an inline comment +x = x + 1 # Increment x +y = y + 1 #: Increment x + +# Block comment +a = 1 + +# Block comment1 + +# Block comment2 +aaa = 1 + + +# example of docstring (not parsed) +def oof(): + """ + #foo not parsed + """ + + ########################################################################### + # A SEPARATOR # + ########################################################################### + + # ####################################################################### # + # ########################## another separator ########################## # + # ####################################################################### # diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E27.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E27.py new file mode 100644 index 000000000..9149f0aa5 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E27.py @@ -0,0 +1,49 @@ +# Okay +from u import (a, b) +from v import c, d +#: E221:13 +from w import (e, f) +#: E275:13 +from w import(e, f) +#: E275:29 +from importable.module import(e, f) +try: + #: E275:33 + from importable.module import(e, f) +except ImportError: + pass +# Okay +True and False +#: E221:8 +True and False +#: E221:4 +True and False +#: E221:2 +if 1: + pass +# Syntax Error, no indentation +#: E903+1 +if 1: +pass +#: E223:8 +True and False +#: E223:4 E223:9 +True and False +#: E221:5 +a and b +#: E221:5 +1 and b +#: E221:5 +a and 2 +#: E221:1 E221:6 +1 and b +#: E221:1 E221:6 +a and 2 +#: E221:4 +this and False +#: E223:5 +a and b +#: E223:1 +a and b +#: E223:4 E223:9 +this and False diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E29.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E29.py new file mode 100644 index 000000000..cebbb7bba --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E29.py @@ -0,0 +1,15 @@ +# Okay +# 情 +#: W291:5 +print + + +#: W291+1 +class Foo(object): + + bang = 12 + + +#: W291+1:34 +'''multiline +string with trailing whitespace''' diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E30.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E30.py new file mode 100644 index 000000000..31e241cd4 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E30.py @@ -0,0 +1,177 @@ +#: E301+4 +class X: + + def a(): + pass + def b(): + pass + + +#: E301+5 +class X: + + def a(): + pass + # comment + def b(): + pass + + +# -*- coding: utf-8 -*- +def a(): + pass + + +#: E302+1:0 +"""Main module.""" +def _main(): + pass + + +#: E302+1:0 +foo = 1 +def get_sys_path(): + return sys.path + + +#: E302+3:0 +def a(): + pass + +def b(): + pass + + +#: E302+5:0 +def a(): + pass + +# comment + +def b(): + pass + + +#: E303+3:0 +print + + + +#: E303+3:0 E303+4:0 +print + + + + +print +#: E303+3:0 +print + + + +# comment + +print + + +#: E303+3 E303+6 +def a(): + print + + + # comment + + + # another comment + + print + + +#: E302+2 +a = 3 +#: E304+1 +@decorator + +def function(): + pass + + +#: E303+3 +# something + + + +"""This class docstring comes on line 5. +It gives error E303: too many blank lines (3) +""" + + +#: E302+6 +def a(): + print + + # comment + + # another comment +a() + + +#: E302+7 +def a(): + print + + # comment + + # another comment + +try: + a() +except Exception: + pass + + +#: E302+4 +def a(): + print + +# Two spaces before comments, too. +if a(): + a() + + +#: E301+2 +def a(): + x = 1 + def b(): + pass + + +#: E301+2 E301+4 +def a(): + x = 2 + def b(): + x = 1 + def c(): + pass + + +#: E301+2 E301+4 E301+5 +def a(): + x = 1 + class C: + pass + x = 2 + def b(): + pass + + +#: E302+7 +# Example from https://github.com/PyCQA/pycodestyle/issues/400 +foo = 2 + + +def main(): + blah, blah + +if __name__ == '__main__': + main() diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E30not.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E30not.py new file mode 100644 index 000000000..c0c005ccd --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E30not.py @@ -0,0 +1,175 @@ +# Okay +class X: + pass +# Okay + + +def foo(): + pass + + +# Okay +# -*- coding: utf-8 -*- +class X: + pass + + +# Okay +# -*- coding: utf-8 -*- +def foo(): + pass + + +# Okay +class X: + + def a(): + pass + + # comment + def b(): + pass + + # This is a + # ... multi-line comment + + def c(): + pass + + +# This is a +# ... multi-line comment + +@some_decorator +class Y: + + def a(): + pass + + # comment + + def b(): + pass + + @property + def c(): + pass + + +try: + from nonexistent import Bar +except ImportError: + class Bar(object): + """This is a Bar replacement""" + + +def with_feature(f): + """Some decorator""" + wrapper = f + if has_this_feature(f): + def wrapper(*args): + call_feature(args[0]) + return f(*args) + return wrapper + + +try: + next +except NameError: + def next(iterator, default): + for item in iterator: + return item + return default + + +def a(): + pass + + +class Foo(): + """Class Foo""" + + def b(): + + pass + + +# comment +def c(): + pass + + +# comment + + +def d(): + pass + +# This is a +# ... multi-line comment + +# And this one is +# ... a second paragraph +# ... which spans on 3 lines + + +# Function `e` is below +# NOTE: Hey this is a testcase + +def e(): + pass + + +def a(): + print + + # comment + + print + + print + +# Comment 1 + +# Comment 2 + + +# Comment 3 + +def b(): + + pass + + +# Okay +def foo(): + pass + + +def bar(): + pass + + +class Foo(object): + pass + + +class Bar(object): + pass + + +if __name__ == '__main__': + foo() +# Okay +classification_errors = None +# Okay +defined_properly = True +# Okay +defaults = {} +defaults.update({}) + + +# Okay +def foo(x): + classification = x + definitely = not classification diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E40.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E40.py new file mode 100644 index 000000000..93a2ccf38 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E40.py @@ -0,0 +1,39 @@ +#: E401:7 +import os, sys +# Okay +import os +import sys + +from subprocess import Popen, PIPE + +from myclass import MyClass +from foo.bar.yourclass import YourClass + +import myclass +import foo.bar.yourclass +# All Okay from here until the definition of VERSION +__all__ = ['abc'] + +import foo +__version__ = "42" + +import foo +__author__ = "Simon Gomizelj" + +import foo +try: + import foo +except ImportError: + pass +else: + hello('imported foo') +finally: + hello('made attempt to import foo') + +import bar +VERSION = '1.2.3' + +#: E402 +import foo +#: E402 +import foo diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E50.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E50.py new file mode 100644 index 000000000..67fd55833 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E50.py @@ -0,0 +1,126 @@ +#: E501:4 +a = '12345678901234567890123456789012345678901234567890123456789012345678901234567890' +#: E501:80 +a = '1234567890123456789012345678901234567890123456789012345678901234567890' or \ + 6 +#: E501+1:80 +a = 7 or \ + '1234567890123456789012345678901234567890123456789012345678901234567890' or \ + 6 +#: E501+1:80 E501+2:80 +a = 7 or \ + '1234567890123456789012345678901234567890123456789012345678901234567890' or \ + '1234567890123456789012345678901234567890123456789012345678901234567890' or \ + 6 +#: E501:78 +a = '1234567890123456789012345678901234567890123456789012345678901234567890' # \ +#: E502:78 +a = ('123456789012345678901234567890123456789012345678901234567890123456789' \ + '01234567890') +#: E502+1:11 +a = ('AAA \ + BBB' \ + 'CCC') +#: E502:38 +if (foo is None and bar is "e000" and \ + blah == 'yeah'): + blah = 'yeahnah' +# +# Okay +a = ('AAA' + 'BBB') + +a = ('AAA \ + BBB' + 'CCC') + +a = 'AAA' \ + 'BBB' \ + 'CCC' + +a = ('AAA\ +BBBBBBBBB\ +CCCCCCCCC\ +DDDDDDDDD') +# +# Okay +if aaa: + pass +elif bbb or \ + ccc: + pass + +ddd = \ + ccc + +('\ + ' + ' \ +') +(''' + ''' + ' \ +') +#: E501:67 E225:21 E225:22 +very_long_identifiers=and_terrible_whitespace_habits(are_no_excuse+for_long_lines) +# +# TODO Long multiline strings are not handled. E501? +'''multiline string +with a long long long long long long long long long long long long long long long long line +''' +#: E501 +'''same thing, but this time without a terminal newline in the string +long long long long long long long long long long long long long long long long line''' +# +# issue 224 (unavoidable long lines in docstrings) +# Okay +""" +I'm some great documentation. Because I'm some great documentation, I'm +going to give you a reference to some valuable information about some API +that I'm calling: + + http://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx +""" +#: E501 +""" +longnospaceslongnospaceslongnospaceslongnospaceslongnospaceslongnospaceslongnospaceslongnospaces""" + + +# Regression test for #622 +def foo(): + """Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis pulvinar vitae + """ + + +# Okay +""" +This + almost_empty_line +""" + +""" +This + almost_empty_line +""" +# A basic comment +#: E501 +# with a long long long long long long long long long long long long long long long long line + +# +# Okay +# I'm some great comment. Because I'm so great, I'm going to give you a +# reference to some valuable information about some API that I'm calling: +# +# http://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx + +x = 3 + +# longnospaceslongnospaceslongnospaceslongnospaceslongnospaceslongnospaceslongnospaceslongnospaces + +# +# Okay +# This +# almost_empty_line + +# +#: E501+1 +# This +# almost_empty_line diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E70.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E70.py new file mode 100644 index 000000000..be11fb1de --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E70.py @@ -0,0 +1,25 @@ +#: E701:6 +if a: a = False +#: E701:41 +if not header or header[:6] != 'bytes=': pass +#: E702:9 +a = False; b = True +#: E702:16 E402 +import bdist_egg; bdist_egg.write_safety_flag(cmd.egg_info, safe) +#: E703:12 E402 +import shlex; +#: E702:8 E703:22 +del a[:]; a.append(42); + + +#: E704:10 +def f(x): return 2 + + +#: E704:10 +def f(x): return 2 * x + + +while all is round: + #: E704:14 + def f(x): return 2 * x diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E71.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E71.py new file mode 100644 index 000000000..109dcd6c7 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E71.py @@ -0,0 +1,93 @@ +#: E711:7 +if res == None: + pass +#: E711:7 +if res != None: + pass +#: E711:8 +if None == res: + pass +#: E711:8 +if None != res: + pass +#: E711:10 +if res[1] == None: + pass +#: E711:10 +if res[1] != None: + pass +#: E711:8 +if None != res[1]: + pass +#: E711:8 +if None == res[1]: + pass + +# +#: E712:7 +if res == True: + pass +#: E712:7 +if res != False: + pass +#: E712:8 +if True != res: + pass +#: E712:9 +if False == res: + pass +#: E712:10 +if res[1] == True: + pass +#: E712:10 +if res[1] != False: + pass + +if x is False: + pass + +# +#: E713:9 +if not X in Y: + pass +#: E713:11 +if not X.B in Y: + pass +#: E713:9 +if not X in Y and Z == "zero": + pass +#: E713:24 +if X == "zero" or not Y in Z: + pass + +# +#: E714:9 +if not X is Y: + pass +#: E714:11 +if not X.B is Y: + pass + +# +# Okay +if x not in y: + pass + +if not (X in Y or X is Z): + pass + +if not (X in Y): + pass + +if x is not y: + pass + +if TrueElement.get_element(True) == TrueElement.get_element(False): + pass + +if (True) == TrueElement or x == TrueElement: + pass + +assert (not foo) in bar +assert {'x': not foo} in bar +assert [42, not foo] in bar diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E72.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E72.py new file mode 100644 index 000000000..2e9ef9151 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E72.py @@ -0,0 +1,79 @@ +#: E721:3 +if type(res) == type(42): + pass +#: E721:3 +if type(res) != type(""): + pass + +import types + +if res == types.IntType: + pass + +import types + +#: E721:3 +if type(res) is not types.ListType: + pass +#: E721:7 E721:35 +assert type(res) == type(False) or type(res) == type(None) +#: E721:7 +assert type(res) == type([]) +#: E721:7 +assert type(res) == type(()) +#: E721:7 +assert type(res) == type((0,)) +#: E721:7 +assert type(res) == type((0)) +#: E721:7 +assert type(res) != type((1,)) +#: E721:7 +assert type(res) is type((1,)) +#: E721:7 +assert type(res) is not type((1,)) + +# Okay +#: E402 +import types + +if isinstance(res, int): + pass +if isinstance(res, str): + pass +if isinstance(res, types.MethodType): + pass + +#: E721:3 E721:25 +if type(a) != type(b) or type(a) == type(ccc): + pass +#: E721 +type(a) != type(b) +#: E721 +1 != type(b) +#: E721 +type(b) != 1 +1 != 1 + +try: + pass +#: E722 +except: + pass +try: + pass +except Exception: + pass +#: E722 +except: + pass +# Okay +fake_code = """" +try: + do_something() +except: + pass +""" +try: + pass +except Exception: + pass diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E73.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E73.py new file mode 100644 index 000000000..77e2e9043 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E73.py @@ -0,0 +1,16 @@ +#: E731:4 +f = lambda x: 2 * x +while False: + #: E731:10 + foo = lambda y, z: 2 * x +# Okay +f = object() +f.method = lambda: 'Method' + +f = {} +f['a'] = lambda x: x ** 2 + +f = [] +f.append(lambda x: x ** 2) + +lambda: 'no-op' diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/LICENSE b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/LICENSE new file mode 100644 index 000000000..142a508a6 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/LICENSE @@ -0,0 +1,29 @@ +Copyright © 2006-2009 Johann C. Rocholl +Copyright © 2009-2014 Florent Xicluna +Copyright © 2014-2016 Ian Lee +Copyright © 2017-???? Dave Halter + +Dave: The files in this folder were ported from pydocstyle and some +modifications where made. + +Licensed under the terms of the Expat License + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation files +(the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/allowed_syntax.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/allowed_syntax.py new file mode 100644 index 000000000..9cccf619c --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/allowed_syntax.py @@ -0,0 +1,163 @@ +""" +Some syntax errors are a bit complicated and need exact checking. Here we +gather some of the potentially dangerous ones. +""" + +from __future__ import division + +# With a dot it's not a future import anymore. +from .__future__ import absolute_import + +'' '' +''r''u'' +b'' BR'' + + +for x in [1]: + break + continue + +try: + pass +except ZeroDivisionError: + pass + #: E722:0 +except: + pass + +try: + pass + #: E722:0 E901:0 +except: + pass +except ZeroDivisionError: + pass + + +r'\n' +r'\x' +b'\n' + + +a = 3 + + +def x(b=a): + global a + + +def x(*args, c=2, d): + pass + + +def x(*, c=2, d): + pass + + +def x(a, b=1, *args, c=2, d): + pass + + +def x(a, b=1, *, c=2, d): + pass + + +lambda *args, c=2, d: (c, d) +lambda *, c=2, d: (c, d) +lambda a, b=1, *args, c=2, d: (c, d) +lambda a, b=1, *, c=2, d: (c, d) + + +*foo, a = (1,) +*foo[0], a = (1,) +*[], a = (1,) + + +async def foo(): + await bar() + #: E901 + yield from [] + return + #: E901 + return '' + + +# With decorator it's a different statement. +@bla +async def foo(): + await bar() + #: E901 + yield from [] + return + #: E901 + return '' + + +foo: int = 4 +(foo): int = 3 +((foo)): int = 3 +foo.bar: int +foo[3]: int + + +def glob(): + global x + y: foo = x + + +def c(): + a = 3 + + def d(): + class X(): + nonlocal a + + +def x(): + a = 3 + + def y(): + nonlocal a + + +def x(): + def y(): + nonlocal a + + a = 3 + + +def x(): + a = 3 + + def y(): + class z(): + nonlocal a + + +def x(a): + def y(): + nonlocal a + + +def x(a, b): + def y(): + nonlocal b + nonlocal a + + +def x(a): + def y(): + def z(): + nonlocal a + + +def x(): + def y(a): + def z(): + nonlocal a + + +a = *args, *args +error[(*args, *args)] = 3 +*args, *args diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/latin-1.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/latin-1.py new file mode 100644 index 000000000..8328cfba9 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/latin-1.py @@ -0,0 +1,6 @@ +# -*- coding: latin-1 -*- +# Test non-UTF8 encoding +latin1 = ('' + '') + +c = ("w") diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/python.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/python.py new file mode 100644 index 000000000..566e90360 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/python.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 +from typing import ClassVar, List + +print(1, 2) + + +# Annotated function (Issue #29) +def foo(x: int) -> int: + return x + 1 + + +# Annotated variables #575 +CONST: int = 42 + + +class Class: + cls_var: ClassVar[str] + + def m(self): + xs: List[int] = [] + + +# True and False are keywords in Python 3 and therefore need a space. +#: E275:13 E275:14 +norman = True+False + + +#: E302+3:0 +def a(): + pass + +async def b(): + pass + + +# Okay +async def add(a: int = 0, b: int = 0) -> int: + return a + b + + +# Previously E251 four times +#: E221:5 +async def add(a: int = 0, b: int = 0) -> int: + return a + b + + +# Previously just E272+1:5 E272+4:5 +#: E302+3 E221:5 E221+3:5 +async def x(): + pass + +async def x(y: int = 1): + pass + + +#: E704:16 +async def f(x): return 2 + + +a[b1, :] == a[b1, ...] + + +# Annotated Function Definitions +# Okay +def munge(input: AnyStr, sep: AnyStr = None, limit=1000, + extra: Union[str, dict] = None) -> AnyStr: + pass + + +#: E225:24 E225:26 +def x(b: tuple = (1, 2))->int: + return a + b + + +#: E252:11 E252:12 E231:8 +def b(a:int=1): + pass + + +if alpha[:-i]: + *a, b = (1, 2, 3) + + +# Named only arguments +def foo(*, asdf): + pass + + +def foo2(bar, *, asdf=2): + pass diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/utf-8-bom.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/utf-8-bom.py new file mode 100644 index 000000000..9c065c949 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/utf-8-bom.py @@ -0,0 +1,6 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +hello = 'こんにちわ' + +# EOF diff --git a/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/utf-8.py b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/utf-8.py new file mode 100644 index 000000000..73ea9a282 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/utf-8.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- + +# Some random text with multi-byte characters (utf-8 encoded) +# +# Εδώ μάτσο κειμένων τη, τρόπο πιθανό διευθυντές ώρα μη. Νέων απλό παράγει ροή +# κι, το επί δεδομένη καθορίζουν. Πάντως ζητήσεις περιβάλλοντος ένα με, τη +# ξέχασε αρπάζεις φαινόμενο όλη. Τρέξει εσφαλμένη χρησιμοποίησέ νέα τι. Θα όρο +# πετάνε φακέλους, άρα με διακοπής λαμβάνουν εφαμοργής. Λες κι μειώσει +# καθυστερεί. + +# 79 narrow chars +# 01 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 [79] + +# 78 narrow chars (Na) + 1 wide char (W) +# 01 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8情 + +# 3 narrow chars (Na) + 40 wide chars (W) +# 情 情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情 + +# 3 narrow chars (Na) + 76 wide chars (W) +# 情 情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情 + +# +# 80 narrow chars (Na) +#: E501 +# 01 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 [80] +# +# 78 narrow chars (Na) + 2 wide char (W) +#: E501 +# 01 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8情情 +# +# 3 narrow chars (Na) + 77 wide chars (W) +#: E501 +# 情 情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情情 +# diff --git a/bundle/jedi-vim/pythonx/parso/test/test_cache.py b/bundle/jedi-vim/pythonx/parso/test/test_cache.py new file mode 100644 index 000000000..398e754a4 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/test_cache.py @@ -0,0 +1,193 @@ +""" +Test all things related to the ``jedi.cache`` module. +""" + +import os +import pytest +import time +from pathlib import Path + +from parso.cache import (_CACHED_FILE_MAXIMUM_SURVIVAL, _VERSION_TAG, + _get_cache_clear_lock_path, _get_hashed_path, + _load_from_file_system, _NodeCacheItem, + _remove_cache_and_update_lock, _save_to_file_system, + load_module, parser_cache, try_to_save_module) +from parso._compatibility import is_pypy +from parso import load_grammar +from parso import cache +from parso import file_io +from parso import parse + +skip_pypy = pytest.mark.skipif( + is_pypy, + reason="pickling in pypy is slow, since we don't pickle," + "we never go into path of auto-collecting garbage" +) + + +@pytest.fixture() +def isolated_parso_cache(monkeypatch, tmpdir): + """Set `parso.cache._default_cache_path` to a temporary directory + during the test. """ + cache_path = Path(str(tmpdir), "__parso_cache") + monkeypatch.setattr(cache, '_default_cache_path', cache_path) + return cache_path + + +def test_modulepickling_change_cache_dir(tmpdir): + """ + ParserPickling should not save old cache when cache_directory is changed. + + See: `#168 `_ + """ + dir_1 = Path(str(tmpdir.mkdir('first'))) + dir_2 = Path(str(tmpdir.mkdir('second'))) + + item_1 = _NodeCacheItem('bla', []) + item_2 = _NodeCacheItem('bla', []) + path_1 = Path('fake path 1') + path_2 = Path('fake path 2') + + hashed_grammar = load_grammar()._hashed + _save_to_file_system(hashed_grammar, path_1, item_1, cache_path=dir_1) + parser_cache.clear() + cached = load_stored_item(hashed_grammar, path_1, item_1, cache_path=dir_1) + assert cached == item_1.node + + _save_to_file_system(hashed_grammar, path_2, item_2, cache_path=dir_2) + cached = load_stored_item(hashed_grammar, path_1, item_1, cache_path=dir_2) + assert cached is None + + +def load_stored_item(hashed_grammar, path, item, cache_path): + """Load `item` stored at `path` in `cache`.""" + item = _load_from_file_system(hashed_grammar, path, item.change_time - 1, cache_path) + return item + + +@pytest.mark.usefixtures("isolated_parso_cache") +def test_modulepickling_simulate_deleted_cache(tmpdir): + """ + Tests loading from a cache file after it is deleted. + According to macOS `dev docs`__, + + Note that the system may delete the Caches/ directory to free up disk + space, so your app must be able to re-create or download these files as + needed. + + It is possible that other supported platforms treat cache files the same + way. + + __ https://developer.apple.com/library/content/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/FileSystemOverview/FileSystemOverview.html + """ # noqa + grammar = load_grammar() + module = 'fake parser' + + # Create the file + path = Path(str(tmpdir.dirname), 'some_path') + with open(path, 'w'): + pass + io = file_io.FileIO(path) + + try_to_save_module(grammar._hashed, io, module, lines=[]) + assert load_module(grammar._hashed, io) == module + + os.unlink(_get_hashed_path(grammar._hashed, path)) + parser_cache.clear() + + cached2 = load_module(grammar._hashed, io) + assert cached2 is None + + +def test_cache_limit(): + def cache_size(): + return sum(len(v) for v in parser_cache.values()) + + try: + parser_cache.clear() + future_node_cache_item = _NodeCacheItem('bla', [], change_time=time.time() + 10e6) + old_node_cache_item = _NodeCacheItem('bla', [], change_time=time.time() - 10e4) + parser_cache['some_hash_old'] = { + '/path/%s' % i: old_node_cache_item for i in range(300) + } + parser_cache['some_hash_new'] = { + '/path/%s' % i: future_node_cache_item for i in range(300) + } + assert cache_size() == 600 + parse('somecode', cache=True, path='/path/somepath') + assert cache_size() == 301 + finally: + parser_cache.clear() + + +class _FixedTimeFileIO(file_io.KnownContentFileIO): + def __init__(self, path, content, last_modified): + super().__init__(path, content) + self._last_modified = last_modified + + def get_last_modified(self): + return self._last_modified + + +@pytest.mark.parametrize('diff_cache', [False, True]) +@pytest.mark.parametrize('use_file_io', [False, True]) +def test_cache_last_used_update(diff_cache, use_file_io): + p = Path('/path/last-used') + parser_cache.clear() # Clear, because then it's easier to find stuff. + parse('somecode', cache=True, path=p) + node_cache_item = next(iter(parser_cache.values()))[p] + now = time.time() + assert node_cache_item.last_used <= now + + if use_file_io: + f = _FixedTimeFileIO(p, 'code', node_cache_item.last_used - 10) + parse(file_io=f, cache=True, diff_cache=diff_cache) + else: + parse('somecode2', cache=True, path=p, diff_cache=diff_cache) + + node_cache_item = next(iter(parser_cache.values()))[p] + assert now <= node_cache_item.last_used <= time.time() + + +@skip_pypy +def test_inactive_cache(tmpdir, isolated_parso_cache): + parser_cache.clear() + test_subjects = "abcdef" + for path in test_subjects: + parse('somecode', cache=True, path=os.path.join(str(tmpdir), path)) + raw_cache_path = isolated_parso_cache.joinpath(_VERSION_TAG) + assert raw_cache_path.exists() + dir_names = os.listdir(raw_cache_path) + a_while_ago = time.time() - _CACHED_FILE_MAXIMUM_SURVIVAL + old_paths = set() + for dir_name in dir_names[:len(test_subjects) // 2]: # make certain number of paths old + os.utime(raw_cache_path.joinpath(dir_name), (a_while_ago, a_while_ago)) + old_paths.add(dir_name) + # nothing should be cleared while the lock is on + assert _get_cache_clear_lock_path().exists() + _remove_cache_and_update_lock() # it shouldn't clear anything + assert len(os.listdir(raw_cache_path)) == len(test_subjects) + assert old_paths.issubset(os.listdir(raw_cache_path)) + + os.utime(_get_cache_clear_lock_path(), (a_while_ago, a_while_ago)) + _remove_cache_and_update_lock() + assert len(os.listdir(raw_cache_path)) == len(test_subjects) // 2 + assert not old_paths.intersection(os.listdir(raw_cache_path)) + + +@skip_pypy +def test_permission_error(monkeypatch): + def save(*args, **kwargs): + nonlocal was_called + was_called = True + raise PermissionError + + was_called = False + + monkeypatch.setattr(cache, '_save_to_file_system', save) + try: + with pytest.warns(Warning): + parse(path=__file__, cache=True, diff_cache=True) + assert was_called + finally: + parser_cache.clear() diff --git a/bundle/jedi-vim/pythonx/parso/test/test_diff_parser.py b/bundle/jedi-vim/pythonx/parso/test/test_diff_parser.py new file mode 100644 index 000000000..222236e7e --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/test_diff_parser.py @@ -0,0 +1,1746 @@ +# -*- coding: utf-8 -*- +from textwrap import dedent +import logging + +import pytest + +from parso.utils import split_lines +from parso import cache +from parso import load_grammar +from parso.python.diff import DiffParser, _assert_valid_graph, _assert_nodes_are_equal +from parso import parse + +ANY = object() + + +def test_simple(): + """ + The diff parser reuses modules. So check for that. + """ + grammar = load_grammar() + module_a = grammar.parse('a', diff_cache=True) + assert grammar.parse('b', diff_cache=True) == module_a + + +def _check_error_leaves_nodes(node): + if node.type in ('error_leaf', 'error_node'): + return node + + try: + children = node.children + except AttributeError: + pass + else: + for child in children: + x_node = _check_error_leaves_nodes(child) + if x_node is not None: + return x_node + return None + + +class Differ: + grammar = load_grammar() + + def initialize(self, code): + logging.debug('differ: initialize') + try: + del cache.parser_cache[self.grammar._hashed][None] + except KeyError: + pass + + self.lines = split_lines(code, keepends=True) + self.module = parse(code, diff_cache=True, cache=True) + assert code == self.module.get_code() + _assert_valid_graph(self.module) + return self.module + + def parse(self, code, copies=0, parsers=0, expect_error_leaves=False): + logging.debug('differ: parse copies=%s parsers=%s', copies, parsers) + lines = split_lines(code, keepends=True) + diff_parser = DiffParser( + self.grammar._pgen_grammar, + self.grammar._tokenizer, + self.module, + ) + new_module = diff_parser.update(self.lines, lines) + self.lines = lines + assert code == new_module.get_code() + + _assert_valid_graph(new_module) + + without_diff_parser_module = parse(code) + _assert_nodes_are_equal(new_module, without_diff_parser_module) + + error_node = _check_error_leaves_nodes(new_module) + assert expect_error_leaves == (error_node is not None), error_node + if parsers is not ANY: + assert diff_parser._parser_count == parsers + if copies is not ANY: + assert diff_parser._copy_count == copies + return new_module + + +@pytest.fixture() +def differ(): + return Differ() + + +def test_change_and_undo(differ): + func_before = 'def func():\n pass\n' + # Parse the function and a. + differ.initialize(func_before + 'a') + # Parse just b. + differ.parse(func_before + 'b', copies=1, parsers=2) + # b has changed to a again, so parse that. + differ.parse(func_before + 'a', copies=1, parsers=2) + # Same as before parsers should not be used. Just a simple copy. + differ.parse(func_before + 'a', copies=1) + + # Now that we have a newline at the end, everything is easier in Python + # syntax, we can parse once and then get a copy. + differ.parse(func_before + 'a\n', copies=1, parsers=2) + differ.parse(func_before + 'a\n', copies=1) + + # Getting rid of an old parser: Still no parsers used. + differ.parse('a\n', copies=1) + # Now the file has completely changed and we need to parse. + differ.parse('b\n', parsers=1) + # And again. + differ.parse('a\n', parsers=1) + + +def test_positions(differ): + func_before = 'class A:\n pass\n' + m = differ.initialize(func_before + 'a') + assert m.start_pos == (1, 0) + assert m.end_pos == (3, 1) + + m = differ.parse('a', copies=1) + assert m.start_pos == (1, 0) + assert m.end_pos == (1, 1) + + m = differ.parse('a\n\n', parsers=1) + assert m.end_pos == (3, 0) + m = differ.parse('a\n\n ', copies=1, parsers=2) + assert m.end_pos == (3, 1) + m = differ.parse('a ', parsers=1) + assert m.end_pos == (1, 2) + + +def test_if_simple(differ): + src = dedent('''\ + if 1: + a = 3 + ''') + else_ = "else:\n a = ''\n" + + differ.initialize(src + 'a') + differ.parse(src + else_ + "a", copies=0, parsers=1) + + differ.parse(else_, parsers=2, expect_error_leaves=True) + differ.parse(src + else_, parsers=1) + + +def test_func_with_for_and_comment(differ): + # The first newline is important, leave it. It should not trigger another + # parser split. + src = dedent("""\ + + def func(): + pass + + + for a in [1]: + # COMMENT + a""") + differ.initialize(src) + differ.parse('a\n' + src, copies=1, parsers=3) + + +def test_one_statement_func(differ): + src = dedent("""\ + first + def func(): a + """) + differ.initialize(src + 'second') + differ.parse(src + 'def second():\n a', parsers=1, copies=1) + + +def test_for_on_one_line(differ): + src = dedent("""\ + foo = 1 + for x in foo: pass + + def hi(): + pass + """) + differ.initialize(src) + + src = dedent("""\ + def hi(): + for x in foo: pass + pass + + pass + """) + differ.parse(src, parsers=2) + + src = dedent("""\ + def hi(): + for x in foo: pass + pass + + def nested(): + pass + """) + # The second parser is for parsing the `def nested()` which is an `equal` + # operation in the SequenceMatcher. + differ.parse(src, parsers=1, copies=1) + + +def test_open_parentheses(differ): + func = 'def func():\n a\n' + code = 'isinstance(\n\n' + func + new_code = 'isinstance(\n' + func + differ.initialize(code) + + differ.parse(new_code, parsers=1, expect_error_leaves=True) + + new_code = 'a = 1\n' + new_code + differ.parse(new_code, parsers=2, expect_error_leaves=True) + + func += 'def other_func():\n pass\n' + differ.initialize('isinstance(\n' + func) + # Cannot copy all, because the prefix of the function is once a newline and + # once not. + differ.parse('isinstance()\n' + func, parsers=2, copies=1) + + +def test_open_parentheses_at_end(differ): + code = "a['" + differ.initialize(code) + differ.parse(code, parsers=1, expect_error_leaves=True) + + +def test_backslash(differ): + src = dedent(r""" + a = 1\ + if 1 else 2 + def x(): + pass + """) + differ.initialize(src) + + src = dedent(r""" + def x(): + a = 1\ + if 1 else 2 + def y(): + pass + """) + differ.parse(src, parsers=1) + + src = dedent(r""" + def first(): + if foo \ + and bar \ + or baz: + pass + def second(): + pass + """) + differ.parse(src, parsers=2) + + +def test_full_copy(differ): + code = 'def foo(bar, baz):\n pass\n bar' + differ.initialize(code) + differ.parse(code, copies=1) + + +def test_wrong_whitespace(differ): + code = ''' + hello + ''' + differ.initialize(code) + differ.parse(code + 'bar\n ', parsers=2, expect_error_leaves=True) + + code += """abc(\npass\n """ + differ.parse(code, parsers=2, expect_error_leaves=True) + + +def test_issues_with_error_leaves(differ): + code = dedent(''' + def ints(): + str.. + str + ''') + code2 = dedent(''' + def ints(): + str. + str + ''') + differ.initialize(code) + differ.parse(code2, parsers=1, expect_error_leaves=True) + + +def test_unfinished_nodes(differ): + code = dedent(''' + class a(): + def __init__(self, a): + self.a = a + def p(self): + a(1) + ''') + code2 = dedent(''' + class a(): + def __init__(self, a): + self.a = a + def p(self): + self + a(1) + ''') + differ.initialize(code) + differ.parse(code2, parsers=2, copies=2) + + +def test_nested_if_and_scopes(differ): + code = dedent(''' + class a(): + if 1: + def b(): + 2 + ''') + code2 = code + ' else:\n 3' + differ.initialize(code) + differ.parse(code2, parsers=1, copies=0) + + +def test_word_before_def(differ): + code1 = 'blub def x():\n' + code2 = code1 + ' s' + differ.initialize(code1) + differ.parse(code2, parsers=1, copies=0, expect_error_leaves=True) + + +def test_classes_with_error_leaves(differ): + code1 = dedent(''' + class X(): + def x(self): + blablabla + assert 3 + self. + + class Y(): + pass + ''') + code2 = dedent(''' + class X(): + def x(self): + blablabla + assert 3 + str( + + class Y(): + pass + ''') + + differ.initialize(code1) + differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True) + + +def test_totally_wrong_whitespace(differ): + code1 = ''' + class X(): + raise n + + class Y(): + pass + ''' + code2 = ''' + class X(): + raise n + str( + + class Y(): + pass + ''' + + differ.initialize(code1) + differ.parse(code2, parsers=2, copies=0, expect_error_leaves=True) + + +def test_node_insertion(differ): + code1 = dedent(''' + class X(): + def y(self): + a = 1 + b = 2 + + c = 3 + d = 4 + ''') + code2 = dedent(''' + class X(): + def y(self): + a = 1 + b = 2 + str + + c = 3 + d = 4 + ''') + + differ.initialize(code1) + differ.parse(code2, parsers=1, copies=2) + + +def test_whitespace_at_end(differ): + code = dedent('str\n\n') + + differ.initialize(code) + differ.parse(code + '\n', parsers=1, copies=1) + + +def test_endless_while_loop(differ): + """ + This was a bug in Jedi #878. + """ + code = '#dead' + differ.initialize(code) + module = differ.parse(code, parsers=1) + assert module.end_pos == (1, 5) + + code = '#dead\n' + differ.initialize(code) + module = differ.parse(code + '\n', parsers=1) + assert module.end_pos == (3, 0) + + +def test_in_class_movements(differ): + code1 = dedent("""\ + class PlaybookExecutor: + p + b + def run(self): + 1 + try: + x + except: + pass + """) + code2 = dedent("""\ + class PlaybookExecutor: + b + def run(self): + 1 + try: + x + except: + pass + """) + + differ.initialize(code1) + differ.parse(code2, parsers=1) + + +def test_in_parentheses_newlines(differ): + code1 = dedent(""" + x = str( + True) + + a = 1 + + def foo(): + pass + + b = 2""") + + code2 = dedent(""" + x = str(True) + + a = 1 + + def foo(): + pass + + b = 2""") + + differ.initialize(code1) + differ.parse(code2, parsers=1, copies=1) + + +def test_indentation_issue(differ): + code1 = dedent(""" + import module + """) + + code2 = dedent(""" + class L1: + class L2: + class L3: + def f(): pass + def f(): pass + def f(): pass + def f(): pass + """) + + differ.initialize(code1) + differ.parse(code2, parsers=2) + + +def test_endmarker_newline(differ): + code1 = dedent('''\ + docu = None + # some comment + result = codet + incomplete_dctassign = { + "module" + + if "a": + x = 3 # asdf + ''') + + code2 = code1.replace('codet', 'coded') + + differ.initialize(code1) + differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True) + + +def test_newlines_at_end(differ): + differ.initialize('a\n\n') + differ.parse('a\n', copies=1) + + +def test_end_newline_with_decorator(differ): + code = dedent('''\ + @staticmethod + def spam(): + import json + json.l''') + + differ.initialize(code) + module = differ.parse(code + '\n', copies=1, parsers=1) + decorated, endmarker = module.children + assert decorated.type == 'decorated' + decorator, func = decorated.children + suite = func.children[-1] + assert suite.type == 'suite' + newline, first_stmt, second_stmt = suite.children + assert first_stmt.get_code() == ' import json\n' + assert second_stmt.get_code() == ' json.l\n' + + +def test_invalid_to_valid_nodes(differ): + code1 = dedent('''\ + def a(): + foo = 3 + def b(): + la = 3 + else: + la + return + foo + base + ''') + code2 = dedent('''\ + def a(): + foo = 3 + def b(): + la = 3 + if foo: + latte = 3 + else: + la + return + foo + base + ''') + + differ.initialize(code1) + differ.parse(code2, parsers=1, copies=3) + + +def test_if_removal_and_reappearence(differ): + code1 = dedent('''\ + la = 3 + if foo: + latte = 3 + else: + la + pass + ''') + + code2 = dedent('''\ + la = 3 + latte = 3 + else: + la + pass + ''') + + code3 = dedent('''\ + la = 3 + if foo: + latte = 3 + else: + la + ''') + differ.initialize(code1) + differ.parse(code2, parsers=3, copies=2, expect_error_leaves=True) + differ.parse(code1, parsers=1, copies=1) + differ.parse(code3, parsers=1, copies=1) + + +def test_add_error_indentation(differ): + code = 'if x:\n 1\n' + differ.initialize(code) + differ.parse(code + ' 2\n', parsers=1, copies=0, expect_error_leaves=True) + + +def test_differing_docstrings(differ): + code1 = dedent('''\ + def foobar(x, y): + 1 + return x + + def bazbiz(): + foobar() + lala + ''') + + code2 = dedent('''\ + def foobar(x, y): + 2 + return x + y + + def bazbiz(): + z = foobar() + lala + ''') + + differ.initialize(code1) + differ.parse(code2, parsers=2, copies=1) + differ.parse(code1, parsers=2, copies=1) + + +def test_one_call_in_function_change(differ): + code1 = dedent('''\ + def f(self): + mro = [self] + for a in something: + yield a + + def g(self): + return C( + a=str, + b=self, + ) + ''') + + code2 = dedent('''\ + def f(self): + mro = [self] + + def g(self): + return C( + a=str, + t + b=self, + ) + ''') + + differ.initialize(code1) + differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True) + differ.parse(code1, parsers=2, copies=1) + + +def test_function_deletion(differ): + code1 = dedent('''\ + class C(list): + def f(self): + def iterate(): + for x in b: + break + + return list(iterate()) + ''') + + code2 = dedent('''\ + class C(): + def f(self): + for x in b: + break + + return list(iterate()) + ''') + + differ.initialize(code1) + differ.parse(code2, parsers=1, copies=0, expect_error_leaves=True) + differ.parse(code1, parsers=1, copies=0) + + +def test_docstring_removal(differ): + code1 = dedent('''\ + class E(Exception): + """ + 1 + 2 + 3 + """ + + class S(object): + @property + def f(self): + return cmd + def __repr__(self): + return cmd2 + ''') + + code2 = dedent('''\ + class E(Exception): + """ + 1 + 3 + """ + + class S(object): + @property + def f(self): + return cmd + return cmd2 + ''') + + differ.initialize(code1) + differ.parse(code2, parsers=1, copies=2) + differ.parse(code1, parsers=3, copies=1) + + +def test_paren_in_strange_position(differ): + code1 = dedent('''\ + class C: + """ ha """ + def __init__(self, message): + self.message = message + ''') + + code2 = dedent('''\ + class C: + """ ha """ + ) + def __init__(self, message): + self.message = message + ''') + + differ.initialize(code1) + differ.parse(code2, parsers=1, copies=2, expect_error_leaves=True) + differ.parse(code1, parsers=0, copies=2) + + +def insert_line_into_code(code, index, line): + lines = split_lines(code, keepends=True) + lines.insert(index, line) + return ''.join(lines) + + +def test_paren_before_docstring(differ): + code1 = dedent('''\ + # comment + """ + The + """ + from parso import tree + from parso import python + ''') + + code2 = insert_line_into_code(code1, 1, ' ' * 16 + 'raise InternalParseError(\n') + + differ.initialize(code1) + differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True) + differ.parse(code1, parsers=2, copies=1) + + +def test_parentheses_before_method(differ): + code1 = dedent('''\ + class A: + def a(self): + pass + + class B: + def b(self): + if 1: + pass + ''') + + code2 = dedent('''\ + class A: + def a(self): + pass + Exception.__init__(self, "x" % + + def b(self): + if 1: + pass + ''') + + differ.initialize(code1) + differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True) + differ.parse(code1, parsers=2, copies=1) + + +def test_indentation_issues(differ): + code1 = dedent('''\ + class C: + def f(): + 1 + if 2: + return 3 + + def g(): + to_be_removed + pass + ''') + + code2 = dedent('''\ + class C: + def f(): + 1 + ``something``, very ``weird``). + if 2: + return 3 + + def g(): + to_be_removed + pass + ''') + + code3 = dedent('''\ + class C: + def f(): + 1 + if 2: + return 3 + + def g(): + pass + ''') + + differ.initialize(code1) + differ.parse(code2, parsers=3, copies=1, expect_error_leaves=True) + differ.parse(code1, copies=1, parsers=2) + differ.parse(code3, parsers=2, copies=1) + differ.parse(code1, parsers=2, copies=1) + + +def test_error_dedent_issues(differ): + code1 = dedent('''\ + while True: + try: + 1 + except KeyError: + if 2: + 3 + except IndexError: + 4 + + 5 + ''') + + code2 = dedent('''\ + while True: + try: + except KeyError: + 1 + except KeyError: + if 2: + 3 + except IndexError: + 4 + + something_inserted + 5 + ''') + + differ.initialize(code1) + differ.parse(code2, parsers=3, copies=0, expect_error_leaves=True) + differ.parse(code1, parsers=1, copies=0) + + +def test_random_text_insertion(differ): + code1 = dedent('''\ +class C: + def f(): + return node + + def g(): + try: + 1 + except KeyError: + 2 + ''') + + code2 = dedent('''\ +class C: + def f(): + return node +Some'random text: yeah + for push in plan.dfa_pushes: + + def g(): + try: + 1 + except KeyError: + 2 + ''') + + differ.initialize(code1) + differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True) + differ.parse(code1, parsers=2, copies=1) + + +def test_many_nested_ifs(differ): + code1 = dedent('''\ + class C: + def f(self): + def iterate(): + if 1: + yield t + else: + yield + return + + def g(): + 3 + ''') + + code2 = dedent('''\ + def f(self): + def iterate(): + if 1: + yield t + hahahaha + if 2: + else: + yield + return + + def g(): + 3 + ''') + + differ.initialize(code1) + differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True) + differ.parse(code1, parsers=1, copies=1) + + +@pytest.mark.parametrize('prefix', ['', 'async ']) +def test_with_and_funcdef_in_call(differ, prefix): + code1 = prefix + dedent('''\ + with x: + la = C( + a=1, + b=2, + c=3, + ) + ''') + + code2 = insert_line_into_code(code1, 3, 'def y(self, args):\n') + + differ.initialize(code1) + differ.parse(code2, parsers=1, expect_error_leaves=True) + differ.parse(code1, parsers=1) + + +def test_wrong_backslash(differ): + code1 = dedent('''\ + def y(): + 1 + for x in y: + continue + ''') + + code2 = insert_line_into_code(code1, 3, '\\.whl$\n') + + differ.initialize(code1) + differ.parse(code2, parsers=3, copies=1, expect_error_leaves=True) + differ.parse(code1, parsers=1, copies=1) + + +def test_random_unicode_characters(differ): + """ + Those issues were all found with the fuzzer. + """ + differ.initialize('') + differ.parse('\x1dĔBϞɛˁşʑ˳˻ȣſéÎ\x90̕ȟòwʘ\x1dĔBϞɛˁşʑ˳˻ȣſéÎ', parsers=1, + expect_error_leaves=True) + differ.parse('\r\r', parsers=1) + differ.parse("˟Ę\x05À\r rúƣ@\x8a\x15r()\n", parsers=1, expect_error_leaves=True) + differ.parse('a\ntaǁ\rGĒōns__\n\nb', parsers=1) + s = ' if not (self, "_fi\x02\x0e\x08\n\nle"):' + differ.parse(s, parsers=1, expect_error_leaves=True) + differ.parse('') + differ.parse(s + '\n', parsers=1, expect_error_leaves=True) + differ.parse(' result = (\r\f\x17\t\x11res)', parsers=1, expect_error_leaves=True) + differ.parse('') + differ.parse(' a( # xx\ndef', parsers=1, expect_error_leaves=True) + + +def test_dedent_end_positions(differ): + code1 = dedent('''\ + if 1: + if b: + 2 + c = { + 5} + ''') + code2 = dedent('''\ + if 1: + if ⌟ഒᜈྡྷṭb: + 2 + 'l': ''} + c = { + 5} + ''') + differ.initialize(code1) + differ.parse(code2, parsers=1, expect_error_leaves=True) + differ.parse(code1, parsers=1) + + +def test_special_no_newline_ending(differ): + code1 = dedent('''\ + 1 + ''') + code2 = dedent('''\ + 1 + is ''') + differ.initialize(code1) + differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True) + differ.parse(code1, copies=1, parsers=0) + + +def test_random_character_insertion(differ): + code1 = dedent('''\ + def create(self): + 1 + if self.path is not None: + return + # 3 + # 4 + ''') + code2 = dedent('''\ + def create(self): + 1 + if 2: + x return + # 3 + # 4 + ''') + differ.initialize(code1) + differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True) + differ.parse(code1, copies=1, parsers=1) + + +def test_import_opening_bracket(differ): + code1 = dedent('''\ + 1 + 2 + from bubu import (X, + ''') + code2 = dedent('''\ + 11 + 2 + from bubu import (X, + ''') + differ.initialize(code1) + differ.parse(code2, copies=1, parsers=2, expect_error_leaves=True) + differ.parse(code1, copies=1, parsers=2, expect_error_leaves=True) + + +def test_opening_bracket_at_end(differ): + code1 = dedent('''\ + class C: + 1 + [ + ''') + code2 = dedent('''\ + 3 + class C: + 1 + [ + ''') + differ.initialize(code1) + differ.parse(code2, copies=1, parsers=2, expect_error_leaves=True) + differ.parse(code1, copies=1, parsers=1, expect_error_leaves=True) + + +def test_all_sorts_of_indentation(differ): + code1 = dedent('''\ + class C: + 1 + def f(): + 'same' + + if foo: + a = b + end + ''') + code2 = dedent('''\ + class C: + 1 + def f(yield await %|( + 'same' + + \x02\x06\x0f\x1c\x11 + if foo: + a = b + + end + ''') + differ.initialize(code1) + differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True) + differ.parse(code1, copies=1, parsers=1, expect_error_leaves=True) + + code3 = dedent('''\ + if 1: + a + b + c + d + \x00 + ''') + differ.parse(code3, parsers=1, expect_error_leaves=True) + differ.parse('') + + +def test_dont_copy_dedents_in_beginning(differ): + code1 = dedent('''\ + a + 4 + ''') + code2 = dedent('''\ + 1 + 2 + 3 + 4 + ''') + differ.initialize(code1) + differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True) + differ.parse(code1, parsers=1, copies=1) + + +def test_dont_copy_error_leaves(differ): + code1 = dedent('''\ + def f(n): + x + if 2: + 3 + ''') + code2 = dedent('''\ + def f(n): + def if 1: + indent + x + if 2: + 3 + ''') + differ.initialize(code1) + differ.parse(code2, parsers=1, expect_error_leaves=True) + differ.parse(code1, parsers=1) + + +def test_error_dedent_in_between(differ): + code1 = dedent('''\ + class C: + def f(): + a + if something: + x + z + ''') + code2 = dedent('''\ + class C: + def f(): + a + dedent + if other_thing: + b + if something: + x + z + ''') + differ.initialize(code1) + differ.parse(code2, copies=1, parsers=2, expect_error_leaves=True) + differ.parse(code1, copies=1, parsers=2) + + +def test_some_other_indentation_issues(differ): + code1 = dedent('''\ + class C: + x + def f(): + "" + copied + a + ''') + code2 = dedent('''\ + try: + de + a + b + c + d + def f(): + "" + copied + a + ''') + differ.initialize(code1) + differ.parse(code2, copies=0, parsers=1, expect_error_leaves=True) + differ.parse(code1, copies=1, parsers=1) + + +def test_open_bracket_case1(differ): + code1 = dedent('''\ + class C: + 1 + 2 # ha + ''') + code2 = insert_line_into_code(code1, 2, ' [str\n') + code3 = insert_line_into_code(code2, 4, ' str\n') + differ.initialize(code1) + differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True) + differ.parse(code3, copies=1, parsers=1, expect_error_leaves=True) + differ.parse(code1, copies=1, parsers=1) + + +def test_open_bracket_case2(differ): + code1 = dedent('''\ + class C: + def f(self): + ( + b + c + + def g(self): + d + ''') + code2 = dedent('''\ + class C: + def f(self): + ( + b + c + self. + + def g(self): + d + ''') + differ.initialize(code1) + differ.parse(code2, copies=0, parsers=1, expect_error_leaves=True) + differ.parse(code1, copies=0, parsers=1, expect_error_leaves=True) + + +def test_some_weird_removals(differ): + code1 = dedent('''\ + class C: + 1 + ''') + code2 = dedent('''\ + class C: + 1 + @property + A + return + # x + omega + ''') + code3 = dedent('''\ + class C: + 1 + ; + omega + ''') + differ.initialize(code1) + differ.parse(code2, copies=1, parsers=1, expect_error_leaves=True) + differ.parse(code3, copies=1, parsers=3, expect_error_leaves=True) + differ.parse(code1, copies=1) + + +def test_async_copy(differ): + code1 = dedent('''\ + async def main(): + x = 3 + print( + ''') + code2 = dedent('''\ + async def main(): + x = 3 + print() + ''') + differ.initialize(code1) + differ.parse(code2, copies=1, parsers=1) + differ.parse(code1, copies=1, parsers=1, expect_error_leaves=True) + + +def test_parent_on_decorator(differ): + code1 = dedent('''\ + class AClass: + @decorator() + def b_test(self): + print("Hello") + print("world") + + def a_test(self): + pass''') + code2 = dedent('''\ + class AClass: + @decorator() + def b_test(self): + print("Hello") + print("world") + + def a_test(self): + pass''') + differ.initialize(code1) + module_node = differ.parse(code2, parsers=1) + cls = module_node.children[0] + cls_suite = cls.children[-1] + assert len(cls_suite.children) == 3 + + +def test_wrong_indent_in_def(differ): + code1 = dedent('''\ + def x(): + a + b + ''') + + code2 = dedent('''\ + def x(): + // + b + c + ''') + differ.initialize(code1) + differ.parse(code2, parsers=1, expect_error_leaves=True) + differ.parse(code1, parsers=1) + + +def test_backslash_issue(differ): + code1 = dedent(''' + pre = ( + '') + after = 'instead' + ''') + code2 = dedent(''' + pre = ( + '') + \\if + ''') # noqa + differ.initialize(code1) + differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True) + differ.parse(code1, parsers=1, copies=1) + + +def test_paren_with_indentation(differ): + code1 = dedent(''' + class C: + def f(self, fullname, path=None): + x + + def load_module(self, fullname): + a + for prefix in self.search_path: + try: + b + except ImportError: + c + else: + raise + def x(): + pass + ''') + code2 = dedent(''' + class C: + def f(self, fullname, path=None): + x + + ( + a + for prefix in self.search_path: + try: + b + except ImportError: + c + else: + raise + ''') + differ.initialize(code1) + differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True) + differ.parse(code1, parsers=3, copies=1) + + +def test_error_dedent_in_function(differ): + code1 = dedent('''\ + def x(): + a + b + c + d + ''') + code2 = dedent('''\ + def x(): + a + b + c + d + e + ''') + differ.initialize(code1) + differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True) + + +def test_with_formfeed(differ): + code1 = dedent('''\ + @bla + async def foo(): + 1 + yield from [] + return + return '' + ''') + code2 = dedent('''\ + @bla + async def foo(): + 1 + \x0cimport + return + return '' + ''') # noqa + differ.initialize(code1) + differ.parse(code2, parsers=ANY, copies=ANY, expect_error_leaves=True) + + +def test_repeating_invalid_indent(differ): + code1 = dedent('''\ + def foo(): + return + + @bla + a + def foo(): + a + b + c + ''') + code2 = dedent('''\ + def foo(): + return + + @bla + a + b + c + ''') + differ.initialize(code1) + differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True) + + +def test_another_random_indent(differ): + code1 = dedent('''\ + def foo(): + a + b + c + return + def foo(): + d + ''') + code2 = dedent('''\ + def foo(): + a + c + return + def foo(): + d + ''') + differ.initialize(code1) + differ.parse(code2, parsers=1, copies=3) + + +def test_invalid_function(differ): + code1 = dedent('''\ + a + def foo(): + def foo(): + b + ''') + code2 = dedent('''\ + a + def foo(): + def foo(): + b + ''') + differ.initialize(code1) + differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True) + + +def test_async_func2(differ): + code1 = dedent('''\ + async def foo(): + return '' + @bla + async def foo(): + x + ''') + code2 = dedent('''\ + async def foo(): + return '' + + { + @bla + async def foo(): + x + y + ''') + differ.initialize(code1) + differ.parse(code2, parsers=ANY, copies=ANY, expect_error_leaves=True) + + +def test_weird_ending(differ): + code1 = dedent('''\ + def foo(): + a + return + ''') + code2 = dedent('''\ + def foo(): + a + nonlocal xF""" + y"""''') + differ.initialize(code1) + differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True) + + +def test_nested_class(differ): + code1 = dedent('''\ +def c(): + a = 3 + class X: + b + ''') + code2 = dedent('''\ +def c(): + a = 3 + class X: + elif + ''') + differ.initialize(code1) + differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True) + + +def test_class_with_paren_breaker(differ): + code1 = dedent('''\ +class Grammar: + x + def parse(): + y + parser( + ) + z + ''') + code2 = dedent('''\ +class Grammar: + x + def parse(): + y + parser( + finally ; + ) + z + ''') + differ.initialize(code1) + differ.parse(code2, parsers=3, copies=1, expect_error_leaves=True) + + +def test_byte_order_mark(differ): + code2 = dedent('''\ + + x + \ufeff + else : + ''') + differ.initialize('\n') + differ.parse(code2, parsers=2, expect_error_leaves=True) + + code3 = dedent('''\ + \ufeff + if: + + x + ''') + differ.initialize('\n') + differ.parse(code3, parsers=2, expect_error_leaves=True) + + +def test_byte_order_mark2(differ): + code = '\ufeff# foo' + differ.initialize(code) + differ.parse(code + 'x', parsers=ANY) + + +def test_byte_order_mark3(differ): + code1 = "\ufeff#\ny\n" + code2 = 'x\n\ufeff#\n\ufeff#\ny\n' + differ.initialize(code1) + differ.parse(code2, expect_error_leaves=True, parsers=ANY, copies=ANY) + differ.parse(code1, parsers=1) + + +def test_backslash_insertion(differ): + code1 = dedent(''' + def f(): + x + def g(): + base = "" \\ + "" + return + ''') + code2 = dedent(''' + def f(): + x + def g(): + base = "" \\ + def h(): + "" + return + ''') + + differ.initialize(code1) + differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True) + differ.parse(code1, parsers=2, copies=1) + + +def test_fstring_with_error_leaf(differ): + code1 = dedent("""\ + def f(): + x + def g(): + y + """) + code2 = dedent("""\ + def f(): + x + F''' + def g(): + y + {a + \x01 + """) + + differ.initialize(code1) + differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True) + + +def test_yet_another_backslash(differ): + code1 = dedent('''\ + def f(): + x + def g(): + y + base = "" \\ + "" % to + return + ''') + code2 = dedent('''\ + def f(): + x + def g(): + y + base = "" \\ + \x0f + return + ''') + + differ.initialize(code1) + differ.parse(code2, parsers=ANY, copies=ANY, expect_error_leaves=True) + differ.parse(code1, parsers=ANY, copies=ANY) + + +def test_backslash_before_def(differ): + code1 = dedent('''\ + def f(): + x + + def g(): + y + z + ''') + code2 = dedent('''\ + def f(): + x + >\\ + def g(): + y + x + z + ''') + + differ.initialize(code1) + differ.parse(code2, parsers=3, copies=1, expect_error_leaves=True) + + +def test_backslash_with_imports(differ): + code1 = dedent('''\ + from x import y, \\ + ''') + code2 = dedent('''\ + from x import y, \\ + z + ''') + + differ.initialize(code1) + differ.parse(code2, parsers=1) + differ.parse(code1, parsers=1) + + +def test_one_line_function_error_recovery(differ): + code1 = dedent('''\ + class X: + x + def y(): word """ + # a + # b + c(self) + ''') + code2 = dedent('''\ + class X: + x + def y(): word """ + # a + # b + c(\x01+self) + ''') + + differ.initialize(code1) + differ.parse(code2, parsers=1, copies=1, expect_error_leaves=True) + + +def test_one_line_property_error_recovery(differ): + code1 = dedent('''\ + class X: + x + @property + def encoding(self): True - + return 1 + ''') + code2 = dedent('''\ + class X: + x + @property + def encoding(self): True - + return 1 + ''') + + differ.initialize(code1) + differ.parse(code2, parsers=2, copies=1, expect_error_leaves=True) diff --git a/bundle/jedi-vim/pythonx/parso/test/test_dump_tree.py b/bundle/jedi-vim/pythonx/parso/test/test_dump_tree.py new file mode 100644 index 000000000..d2d7259f7 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/test_dump_tree.py @@ -0,0 +1,182 @@ +from textwrap import dedent + +import pytest + +from parso import parse +# Using star import for easier eval testing below. +from parso.python.tree import * # noqa: F403 +from parso.tree import * # noqa: F403 +from parso.tree import ErrorLeaf, TypedLeaf + + +@pytest.mark.parametrize( + 'indent,expected_dump', [ + (None, "Module([" + "Lambda([" + "Keyword('lambda', (1, 0)), " + "Param([" + "Name('x', (1, 7), prefix=' '), " + "Operator(',', (1, 8)), " + "]), " + "Param([" + "Name('y', (1, 10), prefix=' '), " + "]), " + "Operator(':', (1, 11)), " + "PythonNode('arith_expr', [" + "Name('x', (1, 13), prefix=' '), " + "Operator('+', (1, 15), prefix=' '), " + "Name('y', (1, 17), prefix=' '), " + "]), " + "]), " + "EndMarker('', (1, 18)), " + "])"), + (0, dedent('''\ + Module([ + Lambda([ + Keyword('lambda', (1, 0)), + Param([ + Name('x', (1, 7), prefix=' '), + Operator(',', (1, 8)), + ]), + Param([ + Name('y', (1, 10), prefix=' '), + ]), + Operator(':', (1, 11)), + PythonNode('arith_expr', [ + Name('x', (1, 13), prefix=' '), + Operator('+', (1, 15), prefix=' '), + Name('y', (1, 17), prefix=' '), + ]), + ]), + EndMarker('', (1, 18)), + ])''')), + (4, dedent('''\ + Module([ + Lambda([ + Keyword('lambda', (1, 0)), + Param([ + Name('x', (1, 7), prefix=' '), + Operator(',', (1, 8)), + ]), + Param([ + Name('y', (1, 10), prefix=' '), + ]), + Operator(':', (1, 11)), + PythonNode('arith_expr', [ + Name('x', (1, 13), prefix=' '), + Operator('+', (1, 15), prefix=' '), + Name('y', (1, 17), prefix=' '), + ]), + ]), + EndMarker('', (1, 18)), + ])''')), + ('\t', dedent('''\ + Module([ + \tLambda([ + \t\tKeyword('lambda', (1, 0)), + \t\tParam([ + \t\t\tName('x', (1, 7), prefix=' '), + \t\t\tOperator(',', (1, 8)), + \t\t]), + \t\tParam([ + \t\t\tName('y', (1, 10), prefix=' '), + \t\t]), + \t\tOperator(':', (1, 11)), + \t\tPythonNode('arith_expr', [ + \t\t\tName('x', (1, 13), prefix=' '), + \t\t\tOperator('+', (1, 15), prefix=' '), + \t\t\tName('y', (1, 17), prefix=' '), + \t\t]), + \t]), + \tEndMarker('', (1, 18)), + ])''')), + ] +) +def test_dump_parser_tree(indent, expected_dump): + code = "lambda x, y: x + y" + module = parse(code) + assert module.dump(indent=indent) == expected_dump + + # Check that dumped tree can be eval'd to recover the parser tree and original code. + recovered_code = eval(expected_dump).get_code() + assert recovered_code == code + + +@pytest.mark.parametrize( + 'node,expected_dump,expected_code', [ + ( # Dump intermediate node (not top level module) + parse("def foo(x, y): return x + y").children[0], dedent('''\ + Function([ + Keyword('def', (1, 0)), + Name('foo', (1, 4), prefix=' '), + PythonNode('parameters', [ + Operator('(', (1, 7)), + Param([ + Name('x', (1, 8)), + Operator(',', (1, 9)), + ]), + Param([ + Name('y', (1, 11), prefix=' '), + ]), + Operator(')', (1, 12)), + ]), + Operator(':', (1, 13)), + ReturnStmt([ + Keyword('return', (1, 15), prefix=' '), + PythonNode('arith_expr', [ + Name('x', (1, 22), prefix=' '), + Operator('+', (1, 24), prefix=' '), + Name('y', (1, 26), prefix=' '), + ]), + ]), + ])'''), + "def foo(x, y): return x + y", + ), + ( # Dump leaf + parse("def foo(x, y): return x + y").children[0].children[0], + "Keyword('def', (1, 0))", + 'def', + ), + ( # Dump ErrorLeaf + ErrorLeaf('error_type', 'error_code', (1, 1), prefix=' '), + "ErrorLeaf('error_type', 'error_code', (1, 1), prefix=' ')", + ' error_code', + ), + ( # Dump TypedLeaf + TypedLeaf('type', 'value', (1, 1)), + "TypedLeaf('type', 'value', (1, 1))", + 'value', + ), + ] +) +def test_dump_parser_tree_not_top_level_module(node, expected_dump, expected_code): + dump_result = node.dump() + assert dump_result == expected_dump + + # Check that dumped tree can be eval'd to recover the parser tree and original code. + recovered_code = eval(dump_result).get_code() + assert recovered_code == expected_code + + +def test_dump_parser_tree_invalid_args(): + module = parse("lambda x, y: x + y") + + with pytest.raises(TypeError): + module.dump(indent=1.1) + + +def test_eval_dump_recovers_parent(): + module = parse("lambda x, y: x + y") + module2 = eval(module.dump()) + assert module2.parent is None + lambda_node = module2.children[0] + assert lambda_node.parent is module2 + assert module2.children[1].parent is module2 + assert lambda_node.children[0].parent is lambda_node + param_node = lambda_node.children[1] + assert param_node.parent is lambda_node + assert param_node.children[0].parent is param_node + assert param_node.children[1].parent is param_node + arith_expr_node = lambda_node.children[-1] + assert arith_expr_node.parent is lambda_node + assert arith_expr_node.children[0].parent is arith_expr_node diff --git a/bundle/jedi-vim/pythonx/parso/test/test_error_recovery.py b/bundle/jedi-vim/pythonx/parso/test/test_error_recovery.py new file mode 100644 index 000000000..87efd4784 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/test_error_recovery.py @@ -0,0 +1,149 @@ +from textwrap import dedent + +from parso import parse, load_grammar + + +def test_with_stmt(): + module = parse('with x: f.\na') + assert module.children[0].type == 'with_stmt' + w, with_item, colon, f = module.children[0].children + assert f.type == 'error_node' + assert f.get_code(include_prefix=False) == 'f.' + + assert module.children[2].type == 'name' + + +def test_one_line_function(each_version): + module = parse('def x(): f.', version=each_version) + assert module.children[0].type == 'funcdef' + def_, name, parameters, colon, f = module.children[0].children + assert f.type == 'error_node' + + module = parse('def x(a:', version=each_version) + func = module.children[0] + assert func.type == 'error_node' + if each_version.startswith('2'): + assert func.children[-1].value == 'a' + else: + assert func.children[-1] == ':' + + +def test_if_else(): + module = parse('if x:\n f.\nelse:\n g(') + if_stmt = module.children[0] + if_, test, colon, suite1, else_, colon, suite2 = if_stmt.children + f = suite1.children[1] + assert f.type == 'error_node' + assert f.children[0].value == 'f' + assert f.children[1].value == '.' + g = suite2.children[1] + assert g.children[0].value == 'g' + assert g.children[1].value == '(' + + +def test_if_stmt(): + module = parse('if x: f.\nelse: g(') + if_stmt = module.children[0] + assert if_stmt.type == 'if_stmt' + if_, test, colon, f = if_stmt.children + assert f.type == 'error_node' + assert f.children[0].value == 'f' + assert f.children[1].value == '.' + + assert module.children[1].type == 'newline' + assert module.children[1].value == '\n' + assert module.children[2].type == 'error_leaf' + assert module.children[2].value == 'else' + assert module.children[3].type == 'error_leaf' + assert module.children[3].value == ':' + + in_else_stmt = module.children[4] + assert in_else_stmt.type == 'error_node' + assert in_else_stmt.children[0].value == 'g' + assert in_else_stmt.children[1].value == '(' + + +def test_invalid_token(): + module = parse('a + ? + b') + error_node, q, plus_b, endmarker = module.children + assert error_node.get_code() == 'a +' + assert q.value == '?' + assert q.type == 'error_leaf' + assert plus_b.type == 'factor' + assert plus_b.get_code() == ' + b' + + +def test_invalid_token_in_fstr(): + module = load_grammar(version='3.9').parse('f"{a + ? + b}"') + error_node, q, plus_b, error1, error2, endmarker = module.children + assert error_node.get_code() == 'f"{a +' + assert q.value == '?' + assert q.type == 'error_leaf' + assert plus_b.type == 'error_node' + assert plus_b.get_code() == ' + b' + assert error1.value == '}' + assert error1.type == 'error_leaf' + assert error2.value == '"' + assert error2.type == 'error_leaf' + + +def test_dedent_issues1(): + code = dedent('''\ + class C: + @property + f + g + end + ''') + module = load_grammar(version='3.8').parse(code) + klass, endmarker = module.children + suite = klass.children[-1] + assert suite.children[2].type == 'error_leaf' + assert suite.children[3].get_code(include_prefix=False) == 'f\n' + assert suite.children[5].get_code(include_prefix=False) == 'g\n' + assert suite.type == 'suite' + + +def test_dedent_issues2(): + code = dedent('''\ + class C: + @property + if 1: + g + else: + h + end + ''') + module = load_grammar(version='3.8').parse(code) + klass, endmarker = module.children + suite = klass.children[-1] + assert suite.children[2].type == 'error_leaf' + if_ = suite.children[3] + assert if_.children[0] == 'if' + assert if_.children[3].type == 'suite' + assert if_.children[3].get_code() == '\n g\n' + assert if_.children[4] == 'else' + assert if_.children[6].type == 'suite' + assert if_.children[6].get_code() == '\n h\n' + + assert suite.children[4].get_code(include_prefix=False) == 'end\n' + assert suite.type == 'suite' + + +def test_dedent_issues3(): + code = dedent('''\ + class C: + f + g + ''') + module = load_grammar(version='3.8').parse(code) + klass, endmarker = module.children + suite = klass.children[-1] + assert len(suite.children) == 4 + assert suite.children[1].get_code() == ' f\n' + assert suite.children[1].type == 'simple_stmt' + assert suite.children[2].get_code() == '' + assert suite.children[2].type == 'error_leaf' + assert suite.children[2].token_type == 'ERROR_DEDENT' + assert suite.children[3].get_code() == ' g\n' + assert suite.children[3].type == 'simple_stmt' diff --git a/bundle/jedi-vim/pythonx/parso/test/test_file_python_errors.py b/bundle/jedi-vim/pythonx/parso/test/test_file_python_errors.py new file mode 100644 index 000000000..7083dfeb4 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/test_file_python_errors.py @@ -0,0 +1,23 @@ +import os + +import parso + + +def get_python_files(path): + for dir_path, dir_names, file_names in os.walk(path): + for file_name in file_names: + if file_name.endswith('.py'): + yield os.path.join(dir_path, file_name) + + +def test_on_itself(each_version): + """ + There are obviously no syntax erros in the Python code of parso. However + parso should output the same for all versions. + """ + grammar = parso.load_grammar(version=each_version) + path = os.path.dirname(os.path.dirname(__file__)) + '/parso' + for file in get_python_files(path): + tree = grammar.parse(path=file) + errors = list(grammar.iter_errors(tree)) + assert not errors diff --git a/bundle/jedi-vim/pythonx/parso/test/test_fstring.py b/bundle/jedi-vim/pythonx/parso/test/test_fstring.py new file mode 100644 index 000000000..c81d027a1 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/test_fstring.py @@ -0,0 +1,164 @@ +import pytest +from textwrap import dedent + +from parso import load_grammar, ParserSyntaxError +from parso.python.tokenize import tokenize + + +@pytest.fixture +def grammar(): + return load_grammar(version='3.8') + + +@pytest.mark.parametrize( + 'code', [ + # simple cases + 'f"{1}"', + 'f"""{1}"""', + 'f"{foo} {bar}"', + + # empty string + 'f""', + 'f""""""', + + # empty format specifier is okay + 'f"{1:}"', + + # use of conversion options + 'f"{1!a}"', + 'f"{1!a:1}"', + + # format specifiers + 'f"{1:1}"', + 'f"{1:1.{32}}"', + 'f"{1::>4}"', + 'f"{x:{y}}"', + 'f"{x:{y:}}"', + 'f"{x:{y:1}}"', + + # Escapes + 'f"{{}}"', + 'f"{{{1}}}"', + 'f"{{{1}"', + 'f"1{{2{{3"', + 'f"}}"', + + # New Python 3.8 syntax f'{a=}' + 'f"{a=}"', + 'f"{a()=}"', + + # multiline f-string + 'f"""abc\ndef"""', + 'f"""abc{\n123}def"""', + + # a line continuation inside of an fstring_string + 'f"abc\\\ndef"', + 'f"\\\n{123}\\\n"', + + # a line continuation inside of an fstring_expr + 'f"{\\\n123}"', + + # a line continuation inside of an format spec + 'f"{123:.2\\\nf}"', + + # some unparenthesized syntactic structures + 'f"{*x,}"', + 'f"{*x, *y}"', + 'f"{x, *y}"', + 'f"{*x, y}"', + 'f"{x for x in [1]}"', + + # named unicode characters + 'f"\\N{BULLET}"', + 'f"\\N{FLEUR-DE-LIS}"', + 'f"\\N{NO ENTRY}"', + 'f"Combo {expr} and \\N{NO ENTRY}"', + 'f"\\N{NO ENTRY} and {expr}"', + 'f"\\N{no entry}"', + 'f"\\N{SOYOMBO LETTER -A}"', + 'f"\\N{DOMINO TILE HORIZONTAL-00-00}"', + 'f"""\\N{NO ENTRY}"""', + ] +) +def test_valid(code, grammar): + module = grammar.parse(code, error_recovery=False) + fstring = module.children[0] + assert fstring.type == 'fstring' + assert fstring.get_code() == code + + +@pytest.mark.parametrize( + 'code', [ + # an f-string can't contain unmatched curly braces + 'f"}"', + 'f"{"', + 'f"""}"""', + 'f"""{"""', + + # invalid conversion characters + 'f"{1!{a}}"', + 'f"{1=!{a}}"', + 'f"{!{a}}"', + + # The curly braces must contain an expression + 'f"{}"', + 'f"{:}"', + 'f"{:}}}"', + 'f"{:1}"', + 'f"{!:}"', + 'f"{!}"', + 'f"{!a}"', + + # invalid (empty) format specifiers + 'f"{1:{}}"', + 'f"{1:{:}}"', + + # a newline without a line continuation inside a single-line string + 'f"abc\ndef"', + + # various named unicode escapes that aren't name-shaped + 'f"\\N{ BULLET }"', + 'f"\\N{NO ENTRY}"', + 'f"""\\N{NO\nENTRY}"""', + ] +) +def test_invalid(code, grammar): + with pytest.raises(ParserSyntaxError): + grammar.parse(code, error_recovery=False) + + # It should work with error recovery. + grammar.parse(code, error_recovery=True) + + +@pytest.mark.parametrize( + ('code', 'positions'), [ + # 2 times 2, 5 because python expr and endmarker. + ('f"}{"', [(1, 0), (1, 2), (1, 3), (1, 4), (1, 5)]), + ('f" :{ 1 : } "', [(1, 0), (1, 2), (1, 4), (1, 6), (1, 8), (1, 9), + (1, 10), (1, 11), (1, 12), (1, 13)]), + ('f"""\n {\nfoo\n }"""', [(1, 0), (1, 4), (2, 1), (3, 0), (4, 1), + (4, 2), (4, 5)]), + ('f"\\N{NO ENTRY} and {expr}"', [(1, 0), (1, 2), (1, 19), (1, 20), + (1, 24), (1, 25), (1, 26)]), + ] +) +def test_tokenize_start_pos(code, positions): + tokens = list(tokenize(code, version_info=(3, 6))) + assert positions == [p.start_pos for p in tokens] + + +@pytest.mark.parametrize( + 'code', [ + dedent("""\ + f'''s{ + str.uppe + ''' + """), + 'f"foo', + 'f"""foo', + 'f"abc\ndef"', + ] +) +def test_roundtrip(grammar, code): + tree = grammar.parse(code) + assert tree.get_code() == code diff --git a/bundle/jedi-vim/pythonx/parso/test/test_get_code.py b/bundle/jedi-vim/pythonx/parso/test/test_get_code.py new file mode 100644 index 000000000..d99d792b9 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/test_get_code.py @@ -0,0 +1,133 @@ +import difflib + +import pytest + +from parso import parse + +code_basic_features = ''' +"""A mod docstring""" + +def a_function(a_argument, a_default = "default"): + """A func docstring""" + + a_result = 3 * a_argument + print(a_result) # a comment + b = """ +from +to""" + "huhu" + + + if a_default == "default": + return str(a_result) + else + return None +''' + + +def diff_code_assert(a, b, n=4): + if a != b: + diff = "\n".join(difflib.unified_diff( + a.splitlines(), + b.splitlines(), + n=n, + lineterm="" + )) + assert False, "Code does not match:\n%s\n\ncreated code:\n%s" % ( + diff, + b + ) + pass + + +def test_basic_parsing(): + """Validate the parsing features""" + + m = parse(code_basic_features) + diff_code_assert( + code_basic_features, + m.get_code() + ) + + +def test_operators(): + src = '5 * 3' + module = parse(src) + diff_code_assert(src, module.get_code()) + + +def test_get_code(): + """Use the same code that the parser also generates, to compare""" + s = '''"""a docstring""" +class SomeClass(object, mixin): + def __init__(self): + self.xy = 3.0 + """statement docstr""" + def some_method(self): + return 1 + def yield_method(self): + while hasattr(self, 'xy'): + yield True + for x in [1, 2]: + yield x + def empty(self): + pass +class Empty: + pass +class WithDocstring: + """class docstr""" + pass +def method_with_docstring(): + """class docstr""" + pass +''' + assert parse(s).get_code() == s + + +def test_end_newlines(): + """ + The Python grammar explicitly needs a newline at the end. Jedi though still + wants to be able, to return the exact same code without the additional new + line the parser needs. + """ + def test(source, end_pos): + module = parse(source) + assert module.get_code() == source + assert module.end_pos == end_pos + + test('a', (1, 1)) + test('a\n', (2, 0)) + test('a\nb', (2, 1)) + test('a\n#comment\n', (3, 0)) + test('a\n#comment', (2, 8)) + test('a#comment', (1, 9)) + test('def a():\n pass', (2, 5)) + + test('def a(', (1, 6)) + + +@pytest.mark.parametrize(('code', 'types'), [ + ('\r', ['endmarker']), + ('\n\r', ['endmarker']) +]) +def test_carriage_return_at_end(code, types): + """ + By adding an artificial newline this created weird side effects for + \r at the end of files. + """ + tree = parse(code) + assert tree.get_code() == code + assert [c.type for c in tree.children] == types + assert tree.end_pos == (len(code) + 1, 0) + + +@pytest.mark.parametrize('code', [ + ' ', + ' F"""', + ' F"""\n', + ' F""" \n', + ' F""" \n3', + ' f"""\n"""', + ' f"""\n"""\n', +]) +def test_full_code_round_trip(code): + assert parse(code).get_code() == code diff --git a/bundle/jedi-vim/pythonx/parso/test/test_grammar.py b/bundle/jedi-vim/pythonx/parso/test/test_grammar.py new file mode 100644 index 000000000..60a249b8f --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/test_grammar.py @@ -0,0 +1,8 @@ +import parso + +import pytest + + +def test_non_unicode(): + with pytest.raises(UnicodeDecodeError): + parso.parse(b'\xe4') diff --git a/bundle/jedi-vim/pythonx/parso/test/test_load_grammar.py b/bundle/jedi-vim/pythonx/parso/test/test_load_grammar.py new file mode 100644 index 000000000..0ea648eb3 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/test_load_grammar.py @@ -0,0 +1,31 @@ +import pytest +from parso.grammar import load_grammar +from parso import utils + + +def test_load_inexisting_grammar(): + # This version shouldn't be out for a while, but if we ever do, wow! + with pytest.raises(NotImplementedError): + load_grammar(version='15.8') + # The same is true for very old grammars (even though this is probably not + # going to be an issue. + with pytest.raises(NotImplementedError): + load_grammar(version='1.5') + + +@pytest.mark.parametrize(('string', 'result'), [ + ('2', (2, 7)), ('3', (3, 6)), ('1.1', (1, 1)), ('1.1.1', (1, 1)), ('300.1.31', (300, 1)) +]) +def test_parse_version(string, result): + assert utils._parse_version(string) == result + + +@pytest.mark.parametrize('string', ['1.', 'a', '#', '1.3.4.5']) +def test_invalid_grammar_version(string): + with pytest.raises(ValueError): + load_grammar(version=string) + + +def test_grammar_int_version(): + with pytest.raises(TypeError): + load_grammar(version=3.8) diff --git a/bundle/jedi-vim/pythonx/parso/test/test_normalizer_issues_files.py b/bundle/jedi-vim/pythonx/parso/test/test_normalizer_issues_files.py new file mode 100644 index 000000000..c6a23497e --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/test_normalizer_issues_files.py @@ -0,0 +1,71 @@ +""" +To easily verify if our normalizer raises the right error codes, just use the +tests of pydocstyle. +""" + +import difflib +import re +from functools import total_ordering +from typing import Iterator, Tuple + +import parso +from parso.utils import python_bytes_to_unicode + + +@total_ordering +class WantedIssue: + def __init__(self, code: str, line: int, column: int) -> None: + self.code = code + self._line = line + self._column = column + + def __eq__(self, other): + return self.code == other.code and self.start_pos == other.start_pos + + def __lt__(self, other: 'WantedIssue') -> bool: + return self.start_pos < other.start_pos or self.code < other.code + + def __hash__(self) -> int: + return hash(str(self.code) + str(self._line) + str(self._column)) + + @property + def start_pos(self) -> Tuple[int, int]: + return self._line, self._column + + +def collect_errors(code: str) -> Iterator[WantedIssue]: + for line_nr, line in enumerate(code.splitlines(), 1): + match = re.match(r'(\s*)#: (.*)$', line) + if match is not None: + codes = match.group(2) + for code in codes.split(): + code, _, add_indent = code.partition(':') + column = int(add_indent or len(match.group(1))) + + code, _, add_line = code.partition('+') + ln = line_nr + 1 + int(add_line or 0) + + yield WantedIssue(code[1:], ln, column) + + +def test_normalizer_issue(normalizer_issue_case): + def sort(issues): + issues = sorted(issues, key=lambda i: (i.start_pos, i.code)) + return ["(%s, %s): %s" % (i.start_pos[0], i.start_pos[1], i.code) + for i in issues] + + with open(normalizer_issue_case.path, 'rb') as f: + code = python_bytes_to_unicode(f.read()) + + desired = sort(collect_errors(code)) + + grammar = parso.load_grammar(version=normalizer_issue_case.python_version) + module = grammar.parse(code) + issues = grammar._get_normalizer_issues(module) + actual = sort(issues) + + diff = '\n'.join(difflib.ndiff(desired, actual)) + # To make the pytest -v diff a bit prettier, stop pytest to rewrite assert + # statements by executing the comparison earlier. + _bool = desired == actual + assert _bool, '\n' + diff diff --git a/bundle/jedi-vim/pythonx/parso/test/test_old_fast_parser.py b/bundle/jedi-vim/pythonx/parso/test/test_old_fast_parser.py new file mode 100644 index 000000000..6f332cfc5 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/test_old_fast_parser.py @@ -0,0 +1,209 @@ +""" +These tests test the cases that the old fast parser tested with the normal +parser. + +The old fast parser doesn't exist anymore and was replaced with a diff parser. +However the tests might still be relevant for the parser. +""" + +from textwrap import dedent + +from parso import parse + + +def test_carriage_return_splitting(): + source = dedent(''' + + + + "string" + + class Foo(): + pass + ''') + source = source.replace('\n', '\r\n') + module = parse(source) + assert [n.value for lst in module.get_used_names().values() for n in lst] == ['Foo'] + + +def check_p(src, number_parsers_used, number_of_splits=None, number_of_misses=0): + if number_of_splits is None: + number_of_splits = number_parsers_used + + module_node = parse(src) + + assert src == module_node.get_code() + return module_node + + +def test_for(): + src = dedent("""\ + for a in [1,2]: + a + + for a1 in 1,"": + a1 + """) + check_p(src, 1) + + +def test_class_with_class_var(): + src = dedent("""\ + class SuperClass: + class_super = 3 + def __init__(self): + self.foo = 4 + pass + """) + check_p(src, 3) + + +def test_func_with_if(): + src = dedent("""\ + def recursion(a): + if foo: + return recursion(a) + else: + if bar: + return inexistent + else: + return a + """) + check_p(src, 1) + + +def test_decorator(): + src = dedent("""\ + class Decorator(): + @memoize + def dec(self, a): + return a + """) + check_p(src, 2) + + +def test_nested_funcs(): + src = dedent("""\ + def memoize(func): + def wrapper(*args, **kwargs): + return func(*args, **kwargs) + return wrapper + """) + check_p(src, 3) + + +def test_multi_line_params(): + src = dedent("""\ + def x(a, + b): + pass + + foo = 1 + """) + check_p(src, 2) + + +def test_class_func_if(): + src = dedent("""\ + class Class: + def func(self): + if 1: + a + else: + b + + pass + """) + check_p(src, 3) + + +def test_multi_line_for(): + src = dedent("""\ + for x in [1, + 2]: + pass + + pass + """) + check_p(src, 1) + + +def test_wrong_indentation(): + src = dedent("""\ + def func(): + a + b + a + """) + check_p(src, 1) + + src = dedent("""\ + def complex(): + def nested(): + a + b + a + + def other(): + pass + """) + check_p(src, 3) + + +def test_strange_parentheses(): + src = dedent(""" + class X(): + a = (1 + if 1 else 2) + def x(): + pass + """) + check_p(src, 2) + + +def test_fake_parentheses(): + """ + The fast parser splitting counts parentheses, but not as correct tokens. + Therefore parentheses in string tokens are included as well. This needs to + be accounted for. + """ + src = dedent(r""" + def x(): + a = (')' + if 1 else 2) + def y(): + pass + def z(): + pass + """) + check_p(src, 3, 2, 1) + + +def test_additional_indent(): + source = dedent('''\ + int( + def x(): + pass + ''') + + check_p(source, 2) + + +def test_round_trip(): + code = dedent(''' + def x(): + """hahaha""" + func''') + + assert parse(code).get_code() == code + + +def test_parentheses_in_string(): + code = dedent(''' + def x(): + '(' + + import abc + + abc.''') + check_p(code, 2, 1, 1) diff --git a/bundle/jedi-vim/pythonx/parso/test/test_param_splitting.py b/bundle/jedi-vim/pythonx/parso/test/test_param_splitting.py new file mode 100644 index 000000000..3ea5f1653 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/test_param_splitting.py @@ -0,0 +1,47 @@ +''' +To make the life of any analysis easier, we are generating Param objects +instead of simple parser objects. +''' + +from textwrap import dedent + +from parso import parse + + +def assert_params(param_string, **wanted_dct): + source = dedent(''' + def x(%s): + pass + ''') % param_string + + module = parse(source) + funcdef = next(module.iter_funcdefs()) + dct = dict((p.name.value, p.default and p.default.get_code()) + for p in funcdef.get_params()) + assert dct == wanted_dct + assert module.get_code() == source + + +def test_split_params_with_separation_star(): + assert_params('x, y=1, *, z=3', x=None, y='1', z='3') + assert_params('*, x', x=None) + assert_params('*') + + +def test_split_params_with_stars(): + assert_params('x, *args', x=None, args=None) + assert_params('**kwargs', kwargs=None) + assert_params('*args, **kwargs', args=None, kwargs=None) + + +def test_kw_only_no_kw(works_in_py): + """ + Parsing this should be working. In CPython the parser also parses this and + in a later step the AST complains. + """ + module = works_in_py.parse('def test(arg, *):\n pass') + if module is not None: + func = module.children[0] + open_, p1, asterisk, close = func._get_param_nodes() + assert p1.get_code('arg,') + assert asterisk.value == '*' diff --git a/bundle/jedi-vim/pythonx/parso/test/test_parser.py b/bundle/jedi-vim/pythonx/parso/test/test_parser.py new file mode 100644 index 000000000..e087b0d55 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/test_parser.py @@ -0,0 +1,208 @@ +# -*- coding: utf-8 -*- +from textwrap import dedent + +import pytest + +from parso import parse +from parso.python import tree +from parso.utils import split_lines + + +def test_basic_parsing(each_version): + def compare(string): + """Generates the AST object and then regenerates the code.""" + assert parse(string, version=each_version).get_code() == string + + compare('\na #pass\n') + compare('wblabla* 1\t\n') + compare('def x(a, b:3): pass\n') + compare('assert foo\n') + + +def test_subscope_names(each_version): + def get_sub(source): + return parse(source, version=each_version).children[0] + + name = get_sub('class Foo: pass').name + assert name.start_pos == (1, len('class ')) + assert name.end_pos == (1, len('class Foo')) + assert name.value == 'Foo' + + name = get_sub('def foo(): pass').name + assert name.start_pos == (1, len('def ')) + assert name.end_pos == (1, len('def foo')) + assert name.value == 'foo' + + +def test_import_names(each_version): + def get_import(source): + return next(parse(source, version=each_version).iter_imports()) + + imp = get_import('import math\n') + names = imp.get_defined_names() + assert len(names) == 1 + assert names[0].value == 'math' + assert names[0].start_pos == (1, len('import ')) + assert names[0].end_pos == (1, len('import math')) + + assert imp.start_pos == (1, 0) + assert imp.end_pos == (1, len('import math')) + + +def test_end_pos(each_version): + s = dedent(''' + x = ['a', 'b', 'c'] + def func(): + y = None + ''') + parser = parse(s, version=each_version) + scope = next(parser.iter_funcdefs()) + assert scope.start_pos == (3, 0) + assert scope.end_pos == (5, 0) + + +def test_carriage_return_statements(each_version): + source = dedent(''' + foo = 'ns1!' + + # this is a namespace package + ''') + source = source.replace('\n', '\r\n') + stmt = parse(source, version=each_version).children[0] + assert '#' not in stmt.get_code() + + +def test_incomplete_list_comprehension(each_version): + """ Shouldn't raise an error, same bug as #418. """ + # With the old parser this actually returned a statement. With the new + # parser only valid statements generate one. + children = parse('(1 for def', version=each_version).children + assert [c.type for c in children] == \ + ['error_node', 'error_node', 'endmarker'] + + +def test_newline_positions(each_version): + endmarker = parse('a\n', version=each_version).children[-1] + assert endmarker.end_pos == (2, 0) + new_line = endmarker.get_previous_leaf() + assert new_line.start_pos == (1, 1) + assert new_line.end_pos == (2, 0) + + +def test_end_pos_error_correction(each_version): + """ + Source code without ending newline are given one, because the Python + grammar needs it. However, they are removed again. We still want the right + end_pos, even if something breaks in the parser (error correction). + """ + s = 'def x():\n .' + m = parse(s, version=each_version) + func = m.children[0] + assert func.type == 'funcdef' + assert func.end_pos == (2, 2) + assert m.end_pos == (2, 2) + + +def test_param_splitting(each_version): + """ + Jedi splits parameters into params, this is not what the grammar does, + but Jedi does this to simplify argument parsing. + """ + def check(src, result): + m = parse(src, version=each_version) + assert not list(m.iter_funcdefs()) + + check('def x(a, (b, c)):\n pass', ['a']) + check('def x((b, c)):\n pass', []) + + +def test_unicode_string(): + s = tree.String(None, 'bö', (0, 0)) + assert repr(s) # Should not raise an Error! + + +def test_backslash_dos_style(each_version): + assert parse('\\\r\n', version=each_version) + + +def test_started_lambda_stmt(each_version): + m = parse('lambda a, b: a i', version=each_version) + assert m.children[0].type == 'error_node' + + +@pytest.mark.parametrize('code', ['foo "', 'foo """\n', 'foo """\nbar']) +def test_open_string_literal(each_version, code): + """ + Testing mostly if removing the last newline works. + """ + lines = split_lines(code, keepends=True) + end_pos = (len(lines), len(lines[-1])) + module = parse(code, version=each_version) + assert module.get_code() == code + assert module.end_pos == end_pos == module.children[1].end_pos + + +def test_too_many_params(): + with pytest.raises(TypeError): + parse('asdf', hello=3) + + +def test_dedent_at_end(each_version): + code = dedent(''' + for foobar in [1]: + foobar''') + module = parse(code, version=each_version) + assert module.get_code() == code + suite = module.children[0].children[-1] + foobar = suite.children[-1] + assert foobar.type == 'name' + + +def test_no_error_nodes(each_version): + def check(node): + assert node.type not in ('error_leaf', 'error_node') + + try: + children = node.children + except AttributeError: + pass + else: + for child in children: + check(child) + + check(parse("if foo:\n bar", version=each_version)) + + +def test_named_expression(works_ge_py38): + works_ge_py38.parse("(a := 1, a + 1)") + + +def test_extended_rhs_annassign(works_ge_py38): + works_ge_py38.parse("x: y = z,") + works_ge_py38.parse("x: Tuple[int, ...] = z, *q, w") + + +@pytest.mark.parametrize( + 'param_code', [ + 'a=1, /', + 'a, /', + 'a=1, /, b=3', + 'a, /, b', + 'a, /, b', + 'a, /, *, b', + 'a, /, **kwargs', + ] +) +def test_positional_only_arguments(works_ge_py38, param_code): + works_ge_py38.parse("def x(%s): pass" % param_code) + + +@pytest.mark.parametrize( + 'expression', [ + 'a + a', + 'lambda x: x', + 'a := lambda x: x' + ] +) +def test_decorator_expression(works_ge_py39, expression): + works_ge_py39.parse("@%s\ndef x(): pass" % expression) diff --git a/bundle/jedi-vim/pythonx/parso/test/test_parser_tree.py b/bundle/jedi-vim/pythonx/parso/test/test_parser_tree.py new file mode 100644 index 000000000..b994b9bbb --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/test_parser_tree.py @@ -0,0 +1,266 @@ +# -*- coding: utf-8 # This file contains Unicode characters. + +from textwrap import dedent + +import pytest + +from parso import parse +from parso.python import tree +from parso.tree import search_ancestor + + +class TestsFunctionAndLambdaParsing: + + FIXTURES = [ + ('def my_function(x, y, z) -> str:\n return x + y * z\n', { + 'name': 'my_function', + 'call_sig': 'my_function(x, y, z)', + 'params': ['x', 'y', 'z'], + 'annotation': "str", + }), + ('lambda x, y, z: x + y * z\n', { + 'name': '', + 'call_sig': '(x, y, z)', + 'params': ['x', 'y', 'z'], + }), + ] + + @pytest.fixture(params=FIXTURES) + def node(self, request): + parsed = parse(dedent(request.param[0]), version='3.10') + request.keywords['expected'] = request.param[1] + child = parsed.children[0] + if child.type == 'simple_stmt': + child = child.children[0] + return child + + @pytest.fixture() + def expected(self, request, node): + return request.keywords['expected'] + + def test_name(self, node, expected): + if node.type != 'lambdef': + assert isinstance(node.name, tree.Name) + assert node.name.value == expected['name'] + + def test_params(self, node, expected): + assert isinstance(node.get_params(), list) + assert all(isinstance(x, tree.Param) for x in node.get_params()) + assert [str(x.name.value) for x in node.get_params()] == [x for x in expected['params']] + + def test_is_generator(self, node, expected): + assert node.is_generator() is expected.get('is_generator', False) + + def test_yields(self, node, expected): + assert node.is_generator() == expected.get('yields', False) + + def test_annotation(self, node, expected): + expected_annotation = expected.get('annotation', None) + if expected_annotation is None: + assert node.annotation is None + else: + assert node.annotation.value == expected_annotation + + +def test_end_pos_line(each_version): + # jedi issue #150 + s = "x()\nx( )\nx( )\nx ( )\n" + + module = parse(s, version=each_version) + for i, simple_stmt in enumerate(module.children[:-1]): + expr_stmt = simple_stmt.children[0] + assert expr_stmt.end_pos == (i + 1, i + 3) + + +def test_default_param(each_version): + func = parse('def x(foo=42): pass', version=each_version).children[0] + param, = func.get_params() + assert param.default.value == '42' + assert param.annotation is None + assert not param.star_count + + +def test_annotation_param(each_version): + func = parse('def x(foo: 3): pass', version=each_version).children[0] + param, = func.get_params() + assert param.default is None + assert param.annotation.value == '3' + assert not param.star_count + + +def test_annotation_params(each_version): + func = parse('def x(foo: 3, bar: 4): pass', version=each_version).children[0] + param1, param2 = func.get_params() + + assert param1.default is None + assert param1.annotation.value == '3' + assert not param1.star_count + + assert param2.default is None + assert param2.annotation.value == '4' + assert not param2.star_count + + +def test_default_and_annotation_param(each_version): + func = parse('def x(foo:3=42): pass', version=each_version).children[0] + param, = func.get_params() + assert param.default.value == '42' + assert param.annotation.value == '3' + assert not param.star_count + + +def get_yield_exprs(code, version): + return list(parse(code, version=version).children[0].iter_yield_exprs()) + + +def get_return_stmts(code): + return list(parse(code).children[0].iter_return_stmts()) + + +def get_raise_stmts(code, child): + return list(parse(code).children[child].iter_raise_stmts()) + + +def test_yields(each_version): + y, = get_yield_exprs('def x(): yield', each_version) + assert y.value == 'yield' + assert y.type == 'keyword' + + y, = get_yield_exprs('def x(): (yield 1)', each_version) + assert y.type == 'yield_expr' + + y, = get_yield_exprs('def x(): [1, (yield)]', each_version) + assert y.type == 'keyword' + + +def test_yield_from(): + y, = get_yield_exprs('def x(): (yield from 1)', '3.8') + assert y.type == 'yield_expr' + + +def test_returns(): + r, = get_return_stmts('def x(): return') + assert r.value == 'return' + assert r.type == 'keyword' + + r, = get_return_stmts('def x(): return 1') + assert r.type == 'return_stmt' + + +def test_raises(): + code = """ +def single_function(): + raise Exception +def top_function(): + def inner_function(): + raise NotImplementedError() + inner_function() + raise Exception +def top_function_three(): + try: + raise NotImplementedError() + except NotImplementedError: + pass + raise Exception + """ + + r = get_raise_stmts(code, 0) # Lists in a simple Function + assert len(list(r)) == 1 + + r = get_raise_stmts(code, 1) # Doesn't Exceptions list in closures + assert len(list(r)) == 1 + + r = get_raise_stmts(code, 2) # Lists inside try-catch + assert len(list(r)) == 2 + + +@pytest.mark.parametrize( + 'code, name_index, is_definition, include_setitem', [ + ('x = 3', 0, True, False), + ('x.y = 3', 0, False, False), + ('x.y = 3', 1, True, False), + ('x.y = u.v = z', 0, False, False), + ('x.y = u.v = z', 1, True, False), + ('x.y = u.v = z', 2, False, False), + ('x.y = u.v, w = z', 3, True, False), + ('x.y = u.v, w = z', 4, True, False), + ('x.y = u.v, w = z', 5, False, False), + + ('x, y = z', 0, True, False), + ('x, y = z', 1, True, False), + ('x, y = z', 2, False, False), + ('x, y = z', 2, False, False), + ('x[0], y = z', 2, False, False), + ('x[0] = z', 0, False, False), + ('x[0], y = z', 0, False, False), + ('x[0], y = z', 2, False, True), + ('x[0] = z', 0, True, True), + ('x[0], y = z', 0, True, True), + ('x: int = z', 0, True, False), + ('x: int = z', 1, False, False), + ('x: int = z', 2, False, False), + ('x: int', 0, True, False), + ('x: int', 1, False, False), + ] +) +def test_is_definition(code, name_index, is_definition, include_setitem): + module = parse(code, version='3.8') + name = module.get_first_leaf() + while True: + if name.type == 'name': + if name_index == 0: + break + name_index -= 1 + name = name.get_next_leaf() + + assert name.is_definition(include_setitem=include_setitem) == is_definition + + +def test_iter_funcdefs(): + code = dedent(''' + def normal(): ... + async def asyn(): ... + @dec + def dec_normal(): ... + @dec1 + @dec2 + async def dec_async(): ... + def broken + ''') + module = parse(code, version='3.8') + func_names = [f.name.value for f in module.iter_funcdefs()] + assert func_names == ['normal', 'asyn', 'dec_normal', 'dec_async'] + + +def test_with_stmt_get_test_node_from_name(): + code = "with A as X.Y, B as (Z), C as Q[0], D as Q['foo']: pass" + with_stmt = parse(code, version='3').children[0] + tests = [ + with_stmt.get_test_node_from_name(name).value + for name in with_stmt.get_defined_names(include_setitem=True) + ] + assert tests == ["A", "B", "C", "D"] + + +sample_module = parse('x + y') +sample_node = sample_module.children[0] +sample_leaf = sample_node.children[0] + + +@pytest.mark.parametrize( + 'node,node_types,expected_ancestor', [ + (sample_module, ('file_input',), None), + (sample_node, ('arith_expr',), None), + (sample_node, ('file_input', 'eval_input'), sample_module), + (sample_leaf, ('name',), None), + (sample_leaf, ('arith_expr',), sample_node), + (sample_leaf, ('file_input',), sample_module), + (sample_leaf, ('file_input', 'arith_expr'), sample_node), + (sample_leaf, ('shift_expr',), None), + (sample_leaf, ('name', 'shift_expr',), None), + (sample_leaf, (), None), + ] +) +def test_search_ancestor(node, node_types, expected_ancestor): + assert node.search_ancestor(*node_types) is expected_ancestor + assert search_ancestor(node, *node_types) is expected_ancestor # deprecated diff --git a/bundle/jedi-vim/pythonx/parso/test/test_pep8.py b/bundle/jedi-vim/pythonx/parso/test/test_pep8.py new file mode 100644 index 000000000..06cffb4af --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/test_pep8.py @@ -0,0 +1,42 @@ +import parso + + +def issues(code): + grammar = parso.load_grammar() + module = parso.parse(code) + return grammar._get_normalizer_issues(module) + + +def test_eof_newline(): + def assert_issue(code): + found = issues(code) + assert len(found) == 1 + issue, = found + assert issue.code == 292 + + assert not issues('asdf = 1\n') + assert not issues('asdf = 1\r\n') + assert not issues('asdf = 1\r') + assert_issue('asdf = 1') + assert_issue('asdf = 1\n# foo') + assert_issue('# foobar') + assert_issue('') + assert_issue('foo = 1 # comment') + + +def test_eof_blankline(): + def assert_issue(code): + found = issues(code) + assert len(found) == 1 + issue, = found + assert issue.code == 391 + + assert_issue('asdf = 1\n\n') + assert_issue('# foobar\n\n') + assert_issue('\n\n') + + +def test_shebang(): + assert not issues('#!\n') + assert not issues('#!/foo\n') + assert not issues('#! python\n') diff --git a/bundle/jedi-vim/pythonx/parso/test/test_pgen2.py b/bundle/jedi-vim/pythonx/parso/test/test_pgen2.py new file mode 100644 index 000000000..85ccacfb4 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/test_pgen2.py @@ -0,0 +1,357 @@ +from textwrap import dedent + +import pytest + +from parso import load_grammar +from parso import ParserSyntaxError +from parso.pgen2 import generate_grammar +from parso.python import tokenize + + +def _parse(code, version=None): + code = dedent(code) + "\n\n" + grammar = load_grammar(version=version) + return grammar.parse(code, error_recovery=False) + + +def _invalid_syntax(code, version=None, **kwargs): + with pytest.raises(ParserSyntaxError): + module = _parse(code, version=version, **kwargs) + # For debugging + print(module.children) + + +def test_formfeed(each_version): + s = "foo\n\x0c\nfoo\n" + t = _parse(s, each_version) + assert t.children[0].children[0].type == 'name' + assert t.children[1].children[0].type == 'name' + s = "1\n\x0c\x0c\n2\n" + t = _parse(s, each_version) + + with pytest.raises(ParserSyntaxError): + s = "\n\x0c2\n" + _parse(s, each_version) + + +def test_matrix_multiplication_operator(works_in_py): + works_in_py.parse("a @ b") + works_in_py.parse("a @= b") + + +def test_yield_from(works_in_py, each_version): + works_in_py.parse("yield from x") + works_in_py.parse("(yield from x) + y") + _invalid_syntax("yield from", each_version) + + +def test_await_expr(works_in_py): + works_in_py.parse("""async def foo(): + await x + """) + + works_in_py.parse("""async def foo(): + + def foo(): pass + + def foo(): pass + + await x + """) + + works_in_py.parse("""async def foo(): return await a""") + + works_in_py.parse("""def foo(): + def foo(): pass + async def foo(): await x + """) + + +@pytest.mark.parametrize( + 'code', [ + "async = 1", + "await = 1", + "def async(): pass", + ] +) +def test_async_var(works_not_in_py, code): + works_not_in_py.parse(code) + + +def test_async_for(works_in_py): + works_in_py.parse("async def foo():\n async for a in b: pass") + + +@pytest.mark.parametrize("body", [ + """[1 async for a in b + ]""", + """[1 async + for a in b + ]""", + """[ + 1 + async for a in b + ]""", + """[ + 1 + async for a + in b + ]""", + """[ + 1 + async + for + a + in + b + ]""", + """ [ + 1 async for a in b + ]""", +]) +def test_async_for_comprehension_newline(works_in_py, body): + # Issue #139 + works_in_py.parse("""async def foo(): + {}""".format(body)) + + +def test_async_with(works_in_py): + works_in_py.parse("async def foo():\n async with a: pass") + + +def test_async_with_invalid(works_in_py): + works_in_py.parse("""def foo():\n async with a: pass""") + + +def test_raise_3x_style_1(each_version): + _parse("raise", each_version) + + +def test_raise_2x_style_2(works_not_in_py): + works_not_in_py.parse("raise E, V") + + +def test_raise_2x_style_3(works_not_in_py): + works_not_in_py.parse("raise E, V, T") + + +def test_raise_2x_style_invalid_1(each_version): + _invalid_syntax("raise E, V, T, Z", version=each_version) + + +def test_raise_3x_style(works_in_py): + works_in_py.parse("raise E1 from E2") + + +def test_raise_3x_style_invalid_1(each_version): + _invalid_syntax("raise E, V from E1", each_version) + + +def test_raise_3x_style_invalid_2(each_version): + _invalid_syntax("raise E from E1, E2", each_version) + + +def test_raise_3x_style_invalid_3(each_version): + _invalid_syntax("raise from E1, E2", each_version) + + +def test_raise_3x_style_invalid_4(each_version): + _invalid_syntax("raise E from", each_version) + + +# Adapted from Python 3's Lib/test/test_grammar.py:GrammarTests.testFuncdef +def test_annotation_1(works_in_py): + works_in_py.parse("""def f(x) -> list: pass""") + + +def test_annotation_2(works_in_py): + works_in_py.parse("""def f(x:int): pass""") + + +def test_annotation_3(works_in_py): + works_in_py.parse("""def f(*x:str): pass""") + + +def test_annotation_4(works_in_py): + works_in_py.parse("""def f(**x:float): pass""") + + +def test_annotation_5(works_in_py): + works_in_py.parse("""def f(x, y:1+2): pass""") + + +def test_annotation_6(each_version): + _invalid_syntax("""def f(a, (b:1, c:2, d)): pass""", each_version) + + +def test_annotation_7(each_version): + _invalid_syntax("""def f(a, (b:1, c:2, d), e:3=4, f=5, *g:6): pass""", each_version) + + +def test_annotation_8(each_version): + s = """def f(a, (b:1, c:2, d), e:3=4, f=5, + *g:6, h:7, i=8, j:9=10, **k:11) -> 12: pass""" + _invalid_syntax(s, each_version) + + +def test_except_new(each_version): + s = dedent(""" + try: + x + except E as N: + y""") + _parse(s, each_version) + + +def test_except_old(works_not_in_py): + s = dedent(""" + try: + x + except E, N: + y""") + works_not_in_py.parse(s) + + +# Adapted from Python 3's Lib/test/test_grammar.py:GrammarTests.testAtoms +def test_set_literal_1(works_in_py): + works_in_py.parse("""x = {'one'}""") + + +def test_set_literal_2(works_in_py): + works_in_py.parse("""x = {'one', 1,}""") + + +def test_set_literal_3(works_in_py): + works_in_py.parse("""x = {'one', 'two', 'three'}""") + + +def test_set_literal_4(works_in_py): + works_in_py.parse("""x = {2, 3, 4,}""") + + +def test_new_octal_notation(each_version): + _parse("""0o7777777777777""", each_version) + _invalid_syntax("""0o7324528887""", each_version) + + +def test_old_octal_notation(works_not_in_py): + works_not_in_py.parse("07") + + +def test_long_notation(works_not_in_py): + works_not_in_py.parse("0xFl") + works_not_in_py.parse("0xFL") + works_not_in_py.parse("0b1l") + works_not_in_py.parse("0B1L") + works_not_in_py.parse("0o7l") + works_not_in_py.parse("0O7L") + works_not_in_py.parse("0l") + works_not_in_py.parse("0L") + works_not_in_py.parse("10l") + works_not_in_py.parse("10L") + + +def test_new_binary_notation(each_version): + _parse("""0b101010""", each_version) + _invalid_syntax("""0b0101021""", each_version) + + +def test_class_new_syntax(works_in_py): + works_in_py.parse("class B(t=7): pass") + works_in_py.parse("class B(t, *args): pass") + works_in_py.parse("class B(t, **kwargs): pass") + works_in_py.parse("class B(t, *args, **kwargs): pass") + works_in_py.parse("class B(t, y=9, *args, **kwargs): pass") + + +def test_parser_idempotency_extended_unpacking(works_in_py): + """A cut-down version of pytree_idempotency.py.""" + works_in_py.parse("a, *b, c = x\n") + works_in_py.parse("[*a, b] = x\n") + works_in_py.parse("(z, *y, w) = m\n") + works_in_py.parse("for *z, m in d: pass\n") + + +def test_multiline_bytes_literals(each_version): + s = """ + md5test(b"\xaa" * 80, + (b"Test Using Larger Than Block-Size Key " + b"and Larger Than One Block-Size Data"), + "6f630fad67cda0ee1fb1f562db3aa53e") + """ + _parse(s, each_version) + + +def test_multiline_bytes_tripquote_literals(each_version): + s = ''' + b""" + + + """ + ''' + _parse(s, each_version) + + +def test_ellipsis(works_in_py, each_version): + works_in_py.parse("...") + _parse("[0][...]", version=each_version) + + +def test_dict_unpacking(works_in_py): + works_in_py.parse("{**dict(a=3), foo:2}") + + +def test_multiline_str_literals(each_version): + s = """ + md5test("\xaa" * 80, + ("Test Using Larger Than Block-Size Key " + "and Larger Than One Block-Size Data"), + "6f630fad67cda0ee1fb1f562db3aa53e") + """ + _parse(s, each_version) + + +def test_py2_backticks(works_not_in_py): + works_not_in_py.parse("`1`") + + +def test_py2_string_prefixes(works_not_in_py): + works_not_in_py.parse("ur'1'") + works_not_in_py.parse("Ur'1'") + works_not_in_py.parse("UR'1'") + _invalid_syntax("ru'1'", works_not_in_py.version) + + +def py_br(each_version): + _parse('br""', each_version) + + +def test_py3_rb(works_in_py): + works_in_py.parse("rb'1'") + works_in_py.parse("RB'1'") + + +def test_left_recursion(): + with pytest.raises(ValueError, match='left recursion'): + generate_grammar('foo: foo NAME\n', tokenize.PythonTokenTypes) + + +@pytest.mark.parametrize( + 'grammar, error_match', [ + ['foo: bar | baz\nbar: NAME\nbaz: NAME\n', + r"foo is ambiguous.*given a (PythonTokenTypes\.)?NAME.*bar or baz"], + ['''foo: bar | baz\nbar: 'x'\nbaz: "x"\n''', + r"foo is ambiguous.*given a ReservedString\(x\).*bar or baz"], + ['''foo: bar | 'x'\nbar: 'x'\n''', + r"foo is ambiguous.*given a ReservedString\(x\).*bar or foo"], + # An ambiguity with the second (not the first) child of a production + ['outer: "a" [inner] "b" "c"\ninner: "b" "c" [inner]\n', + r"outer is ambiguous.*given a ReservedString\(b\).*inner or outer"], + # An ambiguity hidden by a level of indirection (middle) + ['outer: "a" [middle] "b" "c"\nmiddle: inner\ninner: "b" "c" [inner]\n', + r"outer is ambiguous.*given a ReservedString\(b\).*middle or outer"], + ] +) +def test_ambiguities(grammar, error_match): + with pytest.raises(ValueError, match=error_match): + generate_grammar(grammar, tokenize.PythonTokenTypes) diff --git a/bundle/jedi-vim/pythonx/parso/test/test_prefix.py b/bundle/jedi-vim/pythonx/parso/test/test_prefix.py new file mode 100644 index 000000000..58c1dcf98 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/test_prefix.py @@ -0,0 +1,75 @@ +from itertools import zip_longest +from codecs import BOM_UTF8 + +import pytest + +import parso + +unicode_bom = BOM_UTF8.decode('utf-8') + + +@pytest.mark.parametrize(('string', 'tokens'), [ + ('', ['']), + ('#', ['#', '']), + (' # ', ['# ', '']), + (' # \n', ['# ', '\n', '']), + (' # \f\n', ['# ', '\f', '\n', '']), + (' \n', ['\n', '']), + (' \n ', ['\n', ' ']), + (' \f ', ['\f', ' ']), + (' \f ', ['\f', ' ']), + (' \r\n', ['\r\n', '']), + (' \r', ['\r', '']), + ('\\\n', ['\\\n', '']), + ('\\\r\n', ['\\\r\n', '']), + ('\t\t\n\t', ['\n', '\t']), +]) +def test_simple_prefix_splitting(string, tokens): + tree = parso.parse(string) + leaf = tree.children[0] + assert leaf.type == 'endmarker' + + parsed_tokens = list(leaf._split_prefix()) + start_pos = (1, 0) + for pt, expected in zip_longest(parsed_tokens, tokens): + assert pt.value == expected + + # Calculate the estimated end_pos + if expected.endswith('\n') or expected.endswith('\r'): + end_pos = start_pos[0] + 1, 0 + else: + end_pos = start_pos[0], start_pos[1] + len(expected) + len(pt.spacing) + + # assert start_pos == pt.start_pos + assert end_pos == pt.end_pos + start_pos = end_pos + + +@pytest.mark.parametrize(('string', 'types'), [ + ('# ', ['comment', 'spacing']), + ('\r\n', ['newline', 'spacing']), + ('\f', ['formfeed', 'spacing']), + ('\\\n', ['backslash', 'spacing']), + (' \t', ['spacing']), + (' \t ', ['spacing']), + (unicode_bom + ' # ', ['bom', 'comment', 'spacing']), +]) +def test_prefix_splitting_types(string, types): + tree = parso.parse(string) + leaf = tree.children[0] + assert leaf.type == 'endmarker' + parsed_tokens = list(leaf._split_prefix()) + assert [t.type for t in parsed_tokens] == types + + +def test_utf8_bom(): + tree = parso.parse(unicode_bom + 'a = 1') + expr_stmt = tree.children[0] + assert expr_stmt.start_pos == (1, 0) + + tree = parso.parse(unicode_bom + '\n') + endmarker = tree.children[0] + parts = list(endmarker._split_prefix()) + assert [p.type for p in parts] == ['bom', 'newline', 'spacing'] + assert [p.start_pos for p in parts] == [(1, 0), (1, 0), (2, 0)] + assert [p.end_pos for p in parts] == [(1, 0), (2, 0), (2, 0)] diff --git a/bundle/jedi-vim/pythonx/parso/test/test_python_errors.py b/bundle/jedi-vim/pythonx/parso/test/test_python_errors.py new file mode 100644 index 000000000..adf5f0693 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/test_python_errors.py @@ -0,0 +1,510 @@ +""" +Testing if parso finds syntax errors and indentation errors. +""" +import sys +import warnings + +import pytest + +import parso + +from textwrap import dedent +from parso._compatibility import is_pypy +from .failing_examples import FAILING_EXAMPLES, indent, build_nested + + +if is_pypy: + # The errors in PyPy might be different. Just skip the module for now. + pytestmark = pytest.mark.skip() + + +def _get_error_list(code, version=None): + grammar = parso.load_grammar(version=version) + tree = grammar.parse(code) + return list(grammar.iter_errors(tree)) + + +def assert_comparison(code, error_code, positions): + errors = [(error.start_pos, error.code) for error in _get_error_list(code)] + assert [(pos, error_code) for pos in positions] == errors + + +@pytest.mark.parametrize('code', FAILING_EXAMPLES) +def test_python_exception_matches(code): + wanted, line_nr = _get_actual_exception(code) + + errors = _get_error_list(code) + actual = None + if errors: + error, = errors + actual = error.message + assert actual in wanted + # Somehow in Python2.7 the SyntaxError().lineno is sometimes None + assert line_nr is None or line_nr == error.start_pos[0] + + +def test_non_async_in_async(): + """ + This example doesn't work with FAILING_EXAMPLES, because the line numbers + are not always the same / incorrect in Python 3.8. + """ + # Raises multiple errors in previous versions. + code = 'async def foo():\n def nofoo():[x async for x in []]' + wanted, line_nr = _get_actual_exception(code) + + errors = _get_error_list(code) + if errors: + error, = errors + actual = error.message + assert actual in wanted + if sys.version_info[:2] not in ((3, 8), (3, 9)): + assert line_nr == error.start_pos[0] + else: + assert line_nr == 0 # For whatever reason this is zero in Python 3.8/3.9 + + +@pytest.mark.parametrize( + ('code', 'positions'), [ + ('1 +', [(1, 3)]), + ('1 +\n', [(1, 3)]), + ('1 +\n2 +', [(1, 3), (2, 3)]), + ('x + 2', []), + ('[\n', [(2, 0)]), + ('[\ndef x(): pass', [(2, 0)]), + ('[\nif 1: pass', [(2, 0)]), + ('1+?', [(1, 2)]), + ('?', [(1, 0)]), + ('??', [(1, 0)]), + ('? ?', [(1, 0)]), + ('?\n?', [(1, 0), (2, 0)]), + ('? * ?', [(1, 0)]), + ('1 + * * 2', [(1, 4)]), + ('?\n1\n?', [(1, 0), (3, 0)]), + ] +) +def test_syntax_errors(code, positions): + assert_comparison(code, 901, positions) + + +@pytest.mark.parametrize( + ('code', 'positions'), [ + (' 1', [(1, 0)]), + ('def x():\n 1\n 2', [(3, 0)]), + ('def x():\n 1\n 2', [(3, 0)]), + ('def x():\n1', [(2, 0)]), + ] +) +def test_indentation_errors(code, positions): + assert_comparison(code, 903, positions) + + +def _get_actual_exception(code): + with warnings.catch_warnings(): + # We don't care about warnings where locals/globals misbehave here. + # It's as simple as either an error or not. + warnings.filterwarnings('ignore', category=SyntaxWarning) + try: + compile(code, '', 'exec') + except (SyntaxError, IndentationError) as e: + wanted = e.__class__.__name__ + ': ' + e.msg + line_nr = e.lineno + except ValueError as e: + # The ValueError comes from byte literals in Python 2 like '\x' + # that are oddly enough not SyntaxErrors. + wanted = 'SyntaxError: (value error) ' + str(e) + line_nr = None + else: + assert False, "The piece of code should raise an exception." + + # SyntaxError + if wanted == 'SyntaxError: assignment to keyword': + return [wanted, "SyntaxError: can't assign to keyword", + 'SyntaxError: cannot assign to __debug__'], line_nr + elif wanted == 'SyntaxError: f-string: unterminated string': + wanted = 'SyntaxError: EOL while scanning string literal' + elif wanted == 'SyntaxError: f-string expression part cannot include a backslash': + return [ + wanted, + "SyntaxError: EOL while scanning string literal", + "SyntaxError: unexpected character after line continuation character", + ], line_nr + elif wanted == "SyntaxError: f-string: expecting '}'": + wanted = 'SyntaxError: EOL while scanning string literal' + elif wanted == 'SyntaxError: f-string: empty expression not allowed': + wanted = 'SyntaxError: invalid syntax' + elif wanted == "SyntaxError: f-string expression part cannot include '#'": + wanted = 'SyntaxError: invalid syntax' + elif wanted == "SyntaxError: f-string: single '}' is not allowed": + wanted = 'SyntaxError: invalid syntax' + return [wanted], line_nr + + +def test_default_except_error_postition(): + # For this error the position seemed to be one line off in Python < 3.10, + # but that doesn't really matter. + code = 'try: pass\nexcept: pass\nexcept X: pass' + wanted, line_nr = _get_actual_exception(code) + error, = _get_error_list(code) + assert error.message in wanted + if sys.version_info[:2] >= (3, 10): + assert line_nr == error.start_pos[0] + else: + assert line_nr != error.start_pos[0] + # I think this is the better position. + assert error.start_pos[0] == 2 + + +def test_statically_nested_blocks(): + def build(code, depth): + if depth == 0: + return code + + new_code = 'if 1:\n' + indent(code) + return build(new_code, depth - 1) + + def get_error(depth, add_func=False): + code = build('foo', depth) + if add_func: + code = 'def bar():\n' + indent(code) + errors = _get_error_list(code) + if errors: + assert errors[0].message == 'SyntaxError: too many statically nested blocks' + return errors[0] + return None + + assert get_error(19) is None + assert get_error(19, add_func=True) is None + + assert get_error(20) + assert get_error(20, add_func=True) + + +def test_future_import_first(): + def is_issue(code, *args, **kwargs): + code = code % args + return bool(_get_error_list(code, **kwargs)) + + i1 = 'from __future__ import division' + i2 = 'from __future__ import absolute_import' + i3 = 'from __future__ import annotations' + assert not is_issue(i1) + assert not is_issue(i1 + ';' + i2) + assert not is_issue(i1 + '\n' + i2) + assert not is_issue('"";' + i1) + assert not is_issue('"";' + i1) + assert not is_issue('""\n' + i1) + assert not is_issue('""\n%s\n%s', i1, i2) + assert not is_issue('""\n%s;%s', i1, i2) + assert not is_issue('"";%s;%s ', i1, i2) + assert not is_issue('"";%s\n%s ', i1, i2) + assert not is_issue(i3, version="3.7") + assert is_issue(i3, version="3.6") + assert is_issue('1;' + i1) + assert is_issue('1\n' + i1) + assert is_issue('"";1\n' + i1) + assert is_issue('""\n%s\nfrom x import a\n%s', i1, i2) + assert is_issue('%s\n""\n%s', i1, i2) + + +def test_named_argument_issues(works_not_in_py): + message = works_not_in_py.get_error_message('def foo(*, **dict): pass') + message = works_not_in_py.get_error_message('def foo(*): pass') + if works_not_in_py.version.startswith('2'): + assert message == 'SyntaxError: invalid syntax' + else: + assert message == 'SyntaxError: named arguments must follow bare *' + + works_not_in_py.assert_no_error_in_passing('def foo(*, name): pass') + works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1): pass') + works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1, **dct): pass') + + +def test_escape_decode_literals(each_version): + """ + We are using internal functions to assure that unicode/bytes escaping is + without syntax errors. Here we make a bit of quality assurance that this + works through versions, because the internal function might change over + time. + """ + def get_msg(end, to=1): + base = "SyntaxError: (unicode error) 'unicodeescape' " \ + "codec can't decode bytes in position 0-%s: " % to + return base + end + + def get_msgs(escape): + return (get_msg('end of string in escape sequence'), + get_msg(r"truncated %s escape" % escape)) + + error, = _get_error_list(r'u"\x"', version=each_version) + assert error.message in get_msgs(r'\xXX') + + error, = _get_error_list(r'u"\u"', version=each_version) + assert error.message in get_msgs(r'\uXXXX') + + error, = _get_error_list(r'u"\U"', version=each_version) + assert error.message in get_msgs(r'\UXXXXXXXX') + + error, = _get_error_list(r'u"\N{}"', version=each_version) + assert error.message == get_msg(r'malformed \N character escape', to=2) + + error, = _get_error_list(r'u"\N{foo}"', version=each_version) + assert error.message == get_msg(r'unknown Unicode character name', to=6) + + # Finally bytes. + error, = _get_error_list(r'b"\x"', version=each_version) + wanted = r'SyntaxError: (value error) invalid \x escape at position 0' + assert error.message == wanted + + +def test_too_many_levels_of_indentation(): + assert not _get_error_list(build_nested('pass', 99)) + assert _get_error_list(build_nested('pass', 100)) + base = 'def x():\n if x:\n' + assert not _get_error_list(build_nested('pass', 49, base=base)) + assert _get_error_list(build_nested('pass', 50, base=base)) + + +def test_paren_kwarg(): + assert _get_error_list("print((sep)=seperator)", version="3.8") + assert not _get_error_list("print((sep)=seperator)", version="3.7") + + +@pytest.mark.parametrize( + 'code', [ + "f'{*args,}'", + r'f"\""', + r'f"\\\""', + r'fr"\""', + r'fr"\\\""', + r"print(f'Some {x:.2f} and some {y}')", + # Unparenthesized yield expression + 'def foo(): return f"{yield 1}"', + ] +) +def test_valid_fstrings(code): + assert not _get_error_list(code, version='3.6') + + +@pytest.mark.parametrize( + 'code', [ + 'a = (b := 1)', + '[x4 := x ** 5 for x in range(7)]', + '[total := total + v for v in range(10)]', + 'while chunk := file.read(2):\n pass', + 'numbers = [y := math.factorial(x), y**2, y**3]', + '{(a:="a"): (b:=1)}', + '{(y:=1): 2 for x in range(5)}', + 'a[(b:=0)]', + 'a[(b:=0, c:=0)]', + 'a[(b:=0):1:2]', + ] +) +def test_valid_namedexpr(code): + assert not _get_error_list(code, version='3.8') + + +@pytest.mark.parametrize( + 'code', [ + '{x := 1, 2, 3}', + '{x4 := x ** 5 for x in range(7)}', + ] +) +def test_valid_namedexpr_set(code): + assert not _get_error_list(code, version='3.9') + + +@pytest.mark.parametrize( + 'code', [ + 'a[b:=0]', + 'a[b:=0, c:=0]', + ] +) +def test_valid_namedexpr_index(code): + assert not _get_error_list(code, version='3.10') + + +@pytest.mark.parametrize( + ('code', 'message'), [ + ("f'{1+}'", ('invalid syntax')), + (r'f"\"', ('invalid syntax')), + (r'fr"\"', ('invalid syntax')), + ] +) +def test_invalid_fstrings(code, message): + """ + Some fstring errors are handled differntly in 3.6 and other versions. + Therefore check specifically for these errors here. + """ + error, = _get_error_list(code, version='3.6') + assert message in error.message + + +@pytest.mark.parametrize( + 'code', [ + "from foo import (\nbar,\n rab,\n)", + "from foo import (bar, rab, )", + ] +) +def test_trailing_comma(code): + errors = _get_error_list(code) + assert not errors + + +def test_continue_in_finally(): + code = dedent('''\ + for a in [1]: + try: + pass + finally: + continue + ''') + assert not _get_error_list(code, version="3.8") + assert _get_error_list(code, version="3.7") + + +@pytest.mark.parametrize( + 'template', [ + "a, b, {target}, c = d", + "a, b, *{target}, c = d", + "(a, *{target}), c = d", + "for x, {target} in y: pass", + "for x, q, {target} in y: pass", + "for x, q, *{target} in y: pass", + "for (x, *{target}), q in y: pass", + ] +) +@pytest.mark.parametrize( + 'target', [ + "True", + "False", + "None", + "__debug__" + ] +) +def test_forbidden_name(template, target): + assert _get_error_list(template.format(target=target), version="3") + + +def test_repeated_kwarg(): + # python 3.9+ shows which argument is repeated + assert ( + _get_error_list("f(q=1, q=2)", version="3.8")[0].message + == "SyntaxError: keyword argument repeated" + ) + assert ( + _get_error_list("f(q=1, q=2)", version="3.9")[0].message + == "SyntaxError: keyword argument repeated: q" + ) + + +@pytest.mark.parametrize( + ('source', 'no_errors'), [ + ('a(a for a in b,)', False), + ('a(a for a in b, a)', False), + ('a(a, a for a in b)', False), + ('a(a, b, a for a in b, c, d)', False), + ('a(a for a in b)', True), + ('a((a for a in b), c)', True), + ('a(c, (a for a in b))', True), + ('a(a, b, (a for a in b), c, d)', True), + ] +) +def test_unparenthesized_genexp(source, no_errors): + assert bool(_get_error_list(source)) ^ no_errors + + +@pytest.mark.parametrize( + ('source', 'no_errors'), [ + ('*x = 2', False), + ('(*y) = 1', False), + ('((*z)) = 1', False), + ('*a,', True), + ('*a, = 1', True), + ('(*a,)', True), + ('(*a,) = 1', True), + ('[*a]', True), + ('[*a] = 1', True), + ('a, *b', True), + ('a, *b = 1', True), + ('a, *b, c', True), + ('a, *b, c = 1', True), + ('a, (*b, c), d', True), + ('a, (*b, c), d = 1', True), + ('*a.b,', True), + ('*a.b, = 1', True), + ('*a[b],', True), + ('*a[b], = 1', True), + ('*a[b::], c', True), + ('*a[b::], c = 1', True), + ('(a, *[b, c])', True), + ('(a, *[b, c]) = 1', True), + ('[a, *(b, [*c])]', True), + ('[a, *(b, [*c])] = 1', True), + ('[*(1,2,3)]', True), + ('{*(1,2,3)}', True), + ('[*(1,2,3),]', True), + ('[*(1,2,3), *(4,5,6)]', True), + ('[0, *(1,2,3)]', True), + ('{*(1,2,3),}', True), + ('{*(1,2,3), *(4,5,6)}', True), + ('{0, *(4,5,6)}', True) + ] +) +def test_starred_expr(source, no_errors): + assert bool(_get_error_list(source, version="3")) ^ no_errors + + +@pytest.mark.parametrize( + 'code', [ + 'a, (*b), c', + 'a, (*b), c = 1', + 'a, ((*b)), c', + 'a, ((*b)), c = 1', + ] +) +def test_parenthesized_single_starred_expr(code): + assert not _get_error_list(code, version='3.8') + assert _get_error_list(code, version='3.9') + + +@pytest.mark.parametrize( + 'code', [ + '() = ()', + '() = []', + '[] = ()', + '[] = []', + ] +) +def test_valid_empty_assignment(code): + assert not _get_error_list(code) + + +@pytest.mark.parametrize( + 'code', [ + 'del ()', + 'del []', + 'del x', + 'del x,', + 'del x, y', + 'del (x, y)', + 'del [x, y]', + 'del (x, [y, z])', + 'del x.y, x[y]', + 'del f(x)[y::]', + 'del x[[*y]]', + 'del x[[*y]::]', + ] +) +def test_valid_del(code): + assert not _get_error_list(code) + + +@pytest.mark.parametrize( + ('source', 'version', 'no_errors'), [ + ('[x for x in range(10) if lambda: 1]', '3.8', True), + ('[x for x in range(10) if lambda: 1]', '3.9', False), + ('[x for x in range(10) if (lambda: 1)]', '3.9', True), + ] +) +def test_lambda_in_comp_if(source, version, no_errors): + assert bool(_get_error_list(source, version=version)) ^ no_errors diff --git a/bundle/jedi-vim/pythonx/parso/test/test_tokenize.py b/bundle/jedi-vim/pythonx/parso/test/test_tokenize.py new file mode 100644 index 000000000..0029fc8a5 --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/test_tokenize.py @@ -0,0 +1,429 @@ +# -*- coding: utf-8 # This file contains Unicode characters. + +from textwrap import dedent + +import pytest + +from parso.utils import split_lines, parse_version_string +from parso.python.token import PythonTokenTypes +from parso.python import tokenize +from parso import parse +from parso.python.tokenize import PythonToken + + +# To make it easier to access some of the token types, just put them here. +NAME = PythonTokenTypes.NAME +NEWLINE = PythonTokenTypes.NEWLINE +STRING = PythonTokenTypes.STRING +NUMBER = PythonTokenTypes.NUMBER +INDENT = PythonTokenTypes.INDENT +DEDENT = PythonTokenTypes.DEDENT +ERRORTOKEN = PythonTokenTypes.ERRORTOKEN +OP = PythonTokenTypes.OP +ENDMARKER = PythonTokenTypes.ENDMARKER +ERROR_DEDENT = PythonTokenTypes.ERROR_DEDENT +FSTRING_START = PythonTokenTypes.FSTRING_START +FSTRING_STRING = PythonTokenTypes.FSTRING_STRING +FSTRING_END = PythonTokenTypes.FSTRING_END + + +def _get_token_list(string, version=None): + # Load the current version. + version_info = parse_version_string(version) + return list(tokenize.tokenize(string, version_info=version_info)) + + +def test_end_pos_one_line(): + parsed = parse(dedent(''' + def testit(): + a = "huhu" + ''')) + simple_stmt = next(parsed.iter_funcdefs()).get_suite().children[-1] + string = simple_stmt.children[0].get_rhs() + assert string.end_pos == (3, 14) + + +def test_end_pos_multi_line(): + parsed = parse(dedent(''' + def testit(): + a = """huhu + asdfasdf""" + "h" + ''')) + expr_stmt = next(parsed.iter_funcdefs()).get_suite().children[1].children[0] + string_leaf = expr_stmt.get_rhs().children[0] + assert string_leaf.end_pos == (4, 11) + + +def test_simple_no_whitespace(): + # Test a simple one line string, no preceding whitespace + simple_docstring = '"""simple one line docstring"""' + token_list = _get_token_list(simple_docstring) + _, value, _, prefix = token_list[0] + assert prefix == '' + assert value == '"""simple one line docstring"""' + + +def test_simple_with_whitespace(): + # Test a simple one line string with preceding whitespace and newline + simple_docstring = ' """simple one line docstring""" \r\n' + token_list = _get_token_list(simple_docstring) + assert token_list[0][0] == INDENT + typ, value, start_pos, prefix = token_list[1] + assert prefix == ' ' + assert value == '"""simple one line docstring"""' + assert typ == STRING + typ, value, start_pos, prefix = token_list[2] + assert prefix == ' ' + assert typ == NEWLINE + + +def test_function_whitespace(): + # Test function definition whitespace identification + fundef = dedent(''' + def test_whitespace(*args, **kwargs): + x = 1 + if x > 0: + print(True) + ''') + token_list = _get_token_list(fundef) + for _, value, _, prefix in token_list: + if value == 'test_whitespace': + assert prefix == ' ' + if value == '(': + assert prefix == '' + if value == '*': + assert prefix == '' + if value == '**': + assert prefix == ' ' + if value == 'print': + assert prefix == ' ' + if value == 'if': + assert prefix == ' ' + + +def test_tokenize_multiline_I(): + # Make sure multiline string having newlines have the end marker on the + # next line + fundef = '''""""\n''' + token_list = _get_token_list(fundef) + assert token_list == [PythonToken(ERRORTOKEN, '""""\n', (1, 0), ''), + PythonToken(ENDMARKER, '', (2, 0), '')] + + +def test_tokenize_multiline_II(): + # Make sure multiline string having no newlines have the end marker on + # same line + fundef = '''""""''' + token_list = _get_token_list(fundef) + assert token_list == [PythonToken(ERRORTOKEN, '""""', (1, 0), ''), + PythonToken(ENDMARKER, '', (1, 4), '')] + + +def test_tokenize_multiline_III(): + # Make sure multiline string having newlines have the end marker on the + # next line even if several newline + fundef = '''""""\n\n''' + token_list = _get_token_list(fundef) + assert token_list == [PythonToken(ERRORTOKEN, '""""\n\n', (1, 0), ''), + PythonToken(ENDMARKER, '', (3, 0), '')] + + +def test_identifier_contains_unicode(): + fundef = dedent(''' + def 我あφ(): + pass + ''') + token_list = _get_token_list(fundef) + unicode_token = token_list[1] + assert unicode_token[0] == NAME + + +def test_quoted_strings(): + string_tokens = [ + 'u"test"', + 'u"""test"""', + 'U"""test"""', + "u'''test'''", + "U'''test'''", + ] + + for s in string_tokens: + module = parse('''a = %s\n''' % s) + simple_stmt = module.children[0] + expr_stmt = simple_stmt.children[0] + assert len(expr_stmt.children) == 3 + string_tok = expr_stmt.children[2] + assert string_tok.type == 'string' + assert string_tok.value == s + + +def test_ur_literals(): + """ + Decided to parse `u''` literals regardless of Python version. This makes + probably sense: + + - Python 3+ doesn't support it, but it doesn't hurt + not be. While this is incorrect, it's just incorrect for one "old" and in + the future not very important version. + - All the other Python versions work very well with it. + """ + def check(literal, is_literal=True): + token_list = _get_token_list(literal) + typ, result_literal, _, _ = token_list[0] + if is_literal: + if typ != FSTRING_START: + assert typ == STRING + assert result_literal == literal + else: + assert typ == NAME + + check('u""') + check('ur""', is_literal=False) + check('Ur""', is_literal=False) + check('UR""', is_literal=False) + check('bR""') + check('Rb""') + + check('fr""') + check('rF""') + check('f""') + check('F""') + + +def test_error_literal(): + error_token, newline, endmarker = _get_token_list('"\n') + assert error_token.type == ERRORTOKEN + assert error_token.string == '"' + assert newline.type == NEWLINE + assert endmarker.type == ENDMARKER + assert endmarker.prefix == '' + + bracket, error_token, endmarker = _get_token_list('( """') + assert error_token.type == ERRORTOKEN + assert error_token.prefix == ' ' + assert error_token.string == '"""' + assert endmarker.type == ENDMARKER + assert endmarker.prefix == '' + + +def test_endmarker_end_pos(): + def check(code): + tokens = _get_token_list(code) + lines = split_lines(code) + assert tokens[-1].end_pos == (len(lines), len(lines[-1])) + + check('#c') + check('#c\n') + check('a\n') + check('a') + check(r'a\\n') + check('a\\') + + +@pytest.mark.parametrize( + ('code', 'types'), [ + # Indentation + (' foo', [INDENT, NAME, DEDENT]), + (' foo\n bar', [INDENT, NAME, NEWLINE, ERROR_DEDENT, NAME, DEDENT]), + (' foo\n bar \n baz', [INDENT, NAME, NEWLINE, ERROR_DEDENT, NAME, + NEWLINE, NAME, DEDENT]), + (' foo\nbar', [INDENT, NAME, NEWLINE, DEDENT, NAME]), + + # Name stuff + ('1foo1', [NUMBER, NAME]), + ('மெல்லினம்', [NAME]), + ('²', [ERRORTOKEN]), + ('ä²ö', [NAME, ERRORTOKEN, NAME]), + ('ää²¹öö', [NAME, ERRORTOKEN, NAME]), + (' \x00a', [INDENT, ERRORTOKEN, NAME, DEDENT]), + (dedent('''\ + class BaseCache: + a + def + b + def + c + '''), [NAME, NAME, OP, NEWLINE, INDENT, NAME, NEWLINE, + ERROR_DEDENT, NAME, NEWLINE, INDENT, NAME, NEWLINE, DEDENT, + NAME, NEWLINE, INDENT, NAME, NEWLINE, DEDENT, DEDENT]), + (' )\n foo', [INDENT, OP, NEWLINE, ERROR_DEDENT, NAME, DEDENT]), + ('a\n b\n )\n c', [NAME, NEWLINE, INDENT, NAME, NEWLINE, INDENT, OP, + NEWLINE, DEDENT, NAME, DEDENT]), + (' 1 \\\ndef', [INDENT, NUMBER, NAME, DEDENT]), + ] +) +def test_token_types(code, types): + actual_types = [t.type for t in _get_token_list(code)] + assert actual_types == types + [ENDMARKER] + + +def test_error_string(): + indent, t1, newline, token, endmarker = _get_token_list(' "\n') + assert t1.type == ERRORTOKEN + assert t1.prefix == ' ' + assert t1.string == '"' + assert newline.type == NEWLINE + assert endmarker.prefix == '' + assert endmarker.string == '' + + +def test_indent_error_recovery(): + code = dedent("""\ + str( + from x import a + def + """) + lst = _get_token_list(code) + expected = [ + # `str(` + INDENT, NAME, OP, + # `from parso` + NAME, NAME, + # `import a` on same line as the previous from parso + NAME, NAME, NEWLINE, + # Dedent happens, because there's an import now and the import + # statement "breaks" out of the opening paren on the first line. + DEDENT, + # `b` + NAME, NEWLINE, ENDMARKER] + assert [t.type for t in lst] == expected + + +def test_error_token_after_dedent(): + code = dedent("""\ + class C: + pass + $foo + """) + lst = _get_token_list(code) + expected = [ + NAME, NAME, OP, NEWLINE, INDENT, NAME, NEWLINE, DEDENT, + # $foo\n + ERRORTOKEN, NAME, NEWLINE, ENDMARKER + ] + assert [t.type for t in lst] == expected + + +def test_brackets_no_indentation(): + """ + There used to be an issue that the parentheses counting would go below + zero. This should not happen. + """ + code = dedent("""\ + } + { + } + """) + lst = _get_token_list(code) + assert [t.type for t in lst] == [OP, NEWLINE, OP, OP, NEWLINE, ENDMARKER] + + +def test_form_feed(): + indent, error_token, dedent_, endmarker = _get_token_list(dedent('''\ + \f"""''')) + assert error_token.prefix == '\f' + assert error_token.string == '"""' + assert endmarker.prefix == '' + assert indent.type == INDENT + assert dedent_.type == DEDENT + + +def test_carriage_return(): + lst = _get_token_list(' =\\\rclass') + assert [t.type for t in lst] == [INDENT, OP, NAME, DEDENT, ENDMARKER] + + +def test_backslash(): + code = '\\\n# 1 \n' + endmarker, = _get_token_list(code) + assert endmarker.prefix == code + + +@pytest.mark.parametrize( + ('code', 'types'), [ + # f-strings + ('f"', [FSTRING_START]), + ('f""', [FSTRING_START, FSTRING_END]), + ('f" {}"', [FSTRING_START, FSTRING_STRING, OP, OP, FSTRING_END]), + ('f" "{}', [FSTRING_START, FSTRING_STRING, FSTRING_END, OP, OP]), + (r'f"\""', [FSTRING_START, FSTRING_STRING, FSTRING_END]), + (r'f"\""', [FSTRING_START, FSTRING_STRING, FSTRING_END]), + + # format spec + (r'f"Some {x:.2f}{y}"', [FSTRING_START, FSTRING_STRING, OP, NAME, OP, + FSTRING_STRING, OP, OP, NAME, OP, FSTRING_END]), + + # multiline f-string + ('f"""abc\ndef"""', [FSTRING_START, FSTRING_STRING, FSTRING_END]), + ('f"""abc{\n123}def"""', [ + FSTRING_START, FSTRING_STRING, OP, NUMBER, OP, FSTRING_STRING, + FSTRING_END + ]), + + # a line continuation inside of an fstring_string + ('f"abc\\\ndef"', [ + FSTRING_START, FSTRING_STRING, FSTRING_END + ]), + ('f"\\\n{123}\\\n"', [ + FSTRING_START, FSTRING_STRING, OP, NUMBER, OP, FSTRING_STRING, + FSTRING_END + ]), + + # a line continuation inside of an fstring_expr + ('f"{\\\n123}"', [FSTRING_START, OP, NUMBER, OP, FSTRING_END]), + + # a line continuation inside of an format spec + ('f"{123:.2\\\nf}"', [ + FSTRING_START, OP, NUMBER, OP, FSTRING_STRING, OP, FSTRING_END + ]), + + # a newline without a line continuation inside a single-line string is + # wrong, and will generate an ERRORTOKEN + ('f"abc\ndef"', [ + FSTRING_START, FSTRING_STRING, NEWLINE, NAME, ERRORTOKEN + ]), + + # a more complex example + (r'print(f"Some {x:.2f}a{y}")', [ + NAME, OP, FSTRING_START, FSTRING_STRING, OP, NAME, OP, + FSTRING_STRING, OP, FSTRING_STRING, OP, NAME, OP, FSTRING_END, OP + ]), + # issue #86, a string-like in an f-string expression + ('f"{ ""}"', [ + FSTRING_START, OP, FSTRING_END, STRING + ]), + ('f"{ f""}"', [ + FSTRING_START, OP, NAME, FSTRING_END, STRING + ]), + ] +) +def test_fstring_token_types(code, types, each_version): + actual_types = [t.type for t in _get_token_list(code, each_version)] + assert types + [ENDMARKER] == actual_types + + +@pytest.mark.parametrize( + ('code', 'types'), [ + # issue #87, `:=` in the outest paratheses should be tokenized + # as a format spec marker and part of the format + ('f"{x:=10}"', [ + FSTRING_START, OP, NAME, OP, FSTRING_STRING, OP, FSTRING_END + ]), + ('f"{(x:=10)}"', [ + FSTRING_START, OP, OP, NAME, OP, NUMBER, OP, OP, FSTRING_END + ]), + ] +) +def test_fstring_assignment_expression(code, types, version_ge_py38): + actual_types = [t.type for t in _get_token_list(code, version_ge_py38)] + assert types + [ENDMARKER] == actual_types + + +def test_fstring_end_error_pos(version_ge_py38): + f_start, f_string, bracket, f_end, endmarker = \ + _get_token_list('f" { "', version_ge_py38) + assert f_start.start_pos == (1, 0) + assert f_string.start_pos == (1, 2) + assert bracket.start_pos == (1, 3) + assert f_end.start_pos == (1, 5) + assert endmarker.start_pos == (1, 6) diff --git a/bundle/jedi-vim/pythonx/parso/test/test_utils.py b/bundle/jedi-vim/pythonx/parso/test/test_utils.py new file mode 100644 index 000000000..300a54ebc --- /dev/null +++ b/bundle/jedi-vim/pythonx/parso/test/test_utils.py @@ -0,0 +1,107 @@ +from codecs import BOM_UTF8 + +from parso.utils import ( + split_lines, + parse_version_string, + python_bytes_to_unicode, +) + +import parso + +import pytest + + +@pytest.mark.parametrize( + ('string', 'expected_result', 'keepends'), [ + ('asd\r\n', ['asd', ''], False), + ('asd\r\n', ['asd\r\n', ''], True), + ('asd\r', ['asd', ''], False), + ('asd\r', ['asd\r', ''], True), + ('asd\n', ['asd', ''], False), + ('asd\n', ['asd\n', ''], True), + + ('asd\r\n\f', ['asd', '\f'], False), + ('asd\r\n\f', ['asd\r\n', '\f'], True), + + ('\fasd\r\n', ['\fasd', ''], False), + ('\fasd\r\n', ['\fasd\r\n', ''], True), + + ('', [''], False), + ('', [''], True), + + ('\n', ['', ''], False), + ('\n', ['\n', ''], True), + + ('\r', ['', ''], False), + ('\r', ['\r', ''], True), + + # Invalid line breaks + ('a\vb', ['a\vb'], False), + ('a\vb', ['a\vb'], True), + ('\x1C', ['\x1C'], False), + ('\x1C', ['\x1C'], True), + ] +) +def test_split_lines(string, expected_result, keepends): + assert split_lines(string, keepends=keepends) == expected_result + + +def test_python_bytes_to_unicode_unicode_text(): + source = ( + b"# vim: fileencoding=utf-8\n" + b"# \xe3\x81\x82\xe3\x81\x84\xe3\x81\x86\xe3\x81\x88\xe3\x81\x8a\n" + ) + actual = python_bytes_to_unicode(source) + expected = source.decode('utf-8') + assert actual == expected + + +def test_utf8_bom(): + unicode_bom = BOM_UTF8.decode('utf-8') + + module = parso.parse(unicode_bom) + endmarker = module.children[0] + assert endmarker.type == 'endmarker' + assert unicode_bom == endmarker.prefix + + module = parso.parse(unicode_bom + 'foo = 1') + expr_stmt = module.children[0] + assert expr_stmt.type == 'expr_stmt' + assert unicode_bom == expr_stmt.get_first_leaf().prefix + + +@pytest.mark.parametrize( + ('code', 'errors'), [ + (b'# coding: wtf-12\nfoo', 'strict'), + (b'# coding: wtf-12\nfoo', 'replace'), + (b'# coding: wtf-12\r\nfoo', 'strict'), + (b'# coding: wtf-12\r\nfoo', 'replace'), + (b'# coding: wtf-12\rfoo', 'strict'), + (b'# coding: wtf-12\rfoo', 'replace'), + ] +) +def test_bytes_to_unicode_failing_encoding(code, errors): + if errors == 'strict': + with pytest.raises(LookupError): + python_bytes_to_unicode(code, errors=errors) + else: + python_bytes_to_unicode(code, errors=errors) + + +@pytest.mark.parametrize( + ('version_str', 'version'), [ + ('3', (3,)), + ('3.6', (3, 6)), + ('3.6.10', (3, 6)), + ('3.10', (3, 10)), + ('3.10a9', (3, 10)), + ('3.10b9', (3, 10)), + ('3.10rc9', (3, 10)), + ] +) +def test_parse_version_string(version_str, version): + parsed_version = parse_version_string(version_str) + if len(version) == 1: + assert parsed_version[0] == version[0] + else: + assert parsed_version == version diff --git a/bundle/jedi-vim/setup.cfg b/bundle/jedi-vim/setup.cfg new file mode 100644 index 000000000..93e190e59 --- /dev/null +++ b/bundle/jedi-vim/setup.cfg @@ -0,0 +1,5 @@ +[tool:pytest] +testpaths = test + +[flake8] +max-line-length = 100 diff --git a/bundle/jedi-vim/test/_utils.vim b/bundle/jedi-vim/test/_utils.vim new file mode 100644 index 000000000..5ad2a1d45 --- /dev/null +++ b/bundle/jedi-vim/test/_utils.vim @@ -0,0 +1,11 @@ +function! CurrentBufferIsModule(module_name) + return EndsWith(bufname('%'), a:module_name.'.py') +endfunction + + +function EndsWith(string, end) + let l:should = len(a:string) - strlen(a:end) + return l:should == stridx(a:string, a:end, should) +endfunction + +" vim: et:ts=4:sw=4 diff --git a/bundle/jedi-vim/test/test_integration.py b/bundle/jedi-vim/test/test_integration.py new file mode 100644 index 000000000..10a7b1729 --- /dev/null +++ b/bundle/jedi-vim/test/test_integration.py @@ -0,0 +1,65 @@ +"""Runs tests from ./vspec in vim-vspec.""" +import os +import subprocess +try: + from urllib.request import urlretrieve +except ImportError: + from urllib import urlretrieve +import zipfile + +import pytest + +vspec_version = '1.9.0' + +VSPEC_URL = 'https://github.com/kana/vim-vspec/archive/%s.zip' % vspec_version +root = os.path.dirname(os.path.dirname(__file__)) +CACHE_FOLDER = os.path.join(root, 'build') +VSPEC_FOLDER = os.path.join(CACHE_FOLDER, 'vim-vspec-%s' % vspec_version) +VSPEC_RUNNER = os.path.join(VSPEC_FOLDER, 'bin/vspec') +TEST_DIR = os.path.join(root, 'test', 'vspec') + + +@pytest.fixture(scope='session') +def install_vspec(): + if not os.path.isdir(CACHE_FOLDER): + os.mkdir(CACHE_FOLDER) + + if not os.path.exists(VSPEC_FOLDER): + name, hdrs = urlretrieve(VSPEC_URL) + z = zipfile.ZipFile(name) + for n in z.namelist(): + dest = os.path.join(CACHE_FOLDER, n) + destdir = os.path.dirname(dest) + if not os.path.isdir(destdir): + os.makedirs(destdir) + data = z.read(n) + if not os.path.isdir(dest): + with open(dest, 'wb') as f: + f.write(data) + z.close() + os.chmod(VSPEC_RUNNER, 0o777) + + +def get_vspec_tests(): + for f in os.listdir(TEST_DIR): + yield os.path.relpath(os.path.join(TEST_DIR, f)) + + +@pytest.mark.parametrize('path', get_vspec_tests()) +def test_integration(install_vspec, path): + output = subprocess.check_output( + [VSPEC_RUNNER, '.', VSPEC_FOLDER, os.path.relpath(path, root)], + cwd=root, + ) + had_ok = False + for line in output.splitlines(): + if (line.startswith(b'not ok') or + line.startswith(b'Error') or + line.startswith(b'Bail out!')): + pytest.fail(u"{0} failed:\n{1}".format( + path, output.decode('utf-8')), pytrace=False) + if not had_ok and line.startswith(b'ok'): + had_ok = True + if not had_ok: + pytest.fail(u"{0} failed: no 'ok' found:\n{1}".format( + path, output.decode('utf-8')), pytrace=False) diff --git a/bundle/jedi-vim/test/vimrc b/bundle/jedi-vim/test/vimrc new file mode 100644 index 000000000..82111f3df --- /dev/null +++ b/bundle/jedi-vim/test/vimrc @@ -0,0 +1,8 @@ +" Minimal vimrc to use jedi-vim. +" +" Not used anywhere yet, but allows for easy testing. +let script_dir = fnamemodify(expand(''), ':h:h') +let &runtimepath = script_dir.','.&runtimepath.','.script_dir.'/after' + +syntax on +filetype plugin indent on diff --git a/bundle/jedi-vim/test/vspec/choose-venv.vim b/bundle/jedi-vim/test/vspec/choose-venv.vim new file mode 100644 index 000000000..d93ea075a --- /dev/null +++ b/bundle/jedi-vim/test/vspec/choose-venv.vim @@ -0,0 +1,29 @@ +source plugin/jedi.vim +source test/_utils.vim + +describe 'simple:' + before + new + normal! ifoo + end + + after + bd! + end + + it 'choose' + Expect g:jedi#environment_path == 'auto' + Expect bufname('%') == '' + + JediChooseEnvironment + " A Python executable needs to be a few letters + Expect len(getline('.')) > 5 + Expect bufname('%') == 'Hit Enter to Choose an Environment' + + execute "normal \" + Expect g:jedi#environment_path != 'auto' + bd " TODO why is this necessary? There seems to be a random buffer. + Expect bufname('%') == '' + Expect getline('.') == 'foo' + end +end diff --git a/bundle/jedi-vim/test/vspec/completions.vim b/bundle/jedi-vim/test/vspec/completions.vim new file mode 100644 index 000000000..068e5392e --- /dev/null +++ b/bundle/jedi-vim/test/vspec/completions.vim @@ -0,0 +1,131 @@ +let g:jedi#completions_command = 'X' +source plugin/jedi.vim + +describe 'completions' + before + new + set filetype=python + end + + after + " default + let g:jedi#popup_select_first = 1 + bd! + end + + it 'longest in completeopt' + " This gets set up with Vim only on VimEnter. + if has('nvim') + Expect stridx(&completeopt, 'longest') > -1 + else + Expect stridx(&completeopt, 'longest') == -1 + doautocmd VimEnter + Expect stridx(&completeopt, 'longest') > -1 + endif + + " Do not use it for following tests. + set completeopt-=longest + end + + it 'no smart import by default' + exec "normal ifrom os " + Expect getline('.') == 'from os ' + end + + it 'import' + " X is the completion command + normal oimporX + Expect getline('.') == 'import' + normal a subproX + Expect getline('.') == 'import subprocess' + end + + it 'exception' + normal oIndentationErrX + Expect getline('.') == 'IndentationError' + + " Do not remap keys (".") here, otherwise this triggers completion in + " Neovim already. + normal! a().filena + + normal aX + Expect getline('.') == 'IndentationError().filename' + end + + it 'multi complete' + " NOTE: nvim results in "importErr()" here with completeopt+=longest, + " but Vim is fine. + " This is due to `pumvisible()` in jedi#complete_opened being true + " with nvim still, but it is 0 with Vim, i.e. Vim appears to close + " the pum already (with the tests). + " + " This might be a misunderstanding though, since the test might not + " expect the "import" keyword to be offered for completion?! + normal oImpXErrX() + Expect getline('.') == 'ImportError()' + end + + it 'cycling through entries popup_select_first=0' + set completeopt+=longest + let g:jedi#popup_select_first = 0 + execute "normal oraise impX\" + + Expect getline('.') == 'raise ImportError' + set completeopt-=longest + end + + it 'cycling through entries popup_select_first=1' + execute "normal oraise impX\" + Expect getline('.') == 'raise ImportWarning' + end + + it 'cycling through entries popup_select_first=1 and longest' + set completeopt+=longest + execute "normal oraise impX" + Expect getline('.') == 'raise Import' + + " With Neovim pumvisible() is 1 in jedi#complete_opened, which then + " triggers the . This is not the case with Vim. + if has('nvim') + execute "normal oraise impX\" + Expect getline('.') == 'raise ImportWarning' + + execute "normal oraise impX\\" + Expect getline('.') == 'raise imp' + else + execute "normal oraise impX\" + Expect getline('.') == 'raise ImportError' + + execute "normal oraise impX\\" + Expect getline('.') == 'raise ImportWarning' + endif + set completeopt-=longest + end +end + +describe 'smart completions' + before + new + let g:jedi#smart_auto_mappings = 1 + set filetype=python + end + + after + " default + let g:jedi#smart_auto_mappings = 0 + bd! + end + + it 'smart import' + exec "normal ifrom os " + Expect getline('.') == 'from os import ' + end + + it 'no smart import after space' + exec "normal! ifrom os " + exec "normal a " + Expect getline('.') == 'from os ' + end +end + +" vim: et:ts=4:sw=4 diff --git a/bundle/jedi-vim/test/vspec/completions_disabled.vim b/bundle/jedi-vim/test/vspec/completions_disabled.vim new file mode 100644 index 000000000..03d17c5f9 --- /dev/null +++ b/bundle/jedi-vim/test/vspec/completions_disabled.vim @@ -0,0 +1,20 @@ +let g:jedi#completions_command = 'X' +let g:jedi#completions_enabled = 0 +source plugin/jedi.vim + +describe 'completions_disabled' + before + set filetype=python + end + + after + try | %bwipeout! | catch | endtry + end + + it 'typing' + normal oraise ImportErrX + Expect getline('.') == 'raise ImportErrX' + end +end + +" vim: et:ts=4:sw=4 diff --git a/bundle/jedi-vim/test/vspec/documentation.vim b/bundle/jedi-vim/test/vspec/documentation.vim new file mode 100644 index 000000000..c97c1a891 --- /dev/null +++ b/bundle/jedi-vim/test/vspec/documentation.vim @@ -0,0 +1,34 @@ +source plugin/jedi.vim + +describe 'documentation docstrings' + before + set filetype=python + end + + after + try | %bwipeout! | catch | endtry + end + + it 'simple' + Expect maparg('K') == ':call jedi#show_documentation()' + put = 'ImportError' + normal GK + Expect bufname('%') == "__doc__" + Expect &filetype == 'rst' + let header = getline(1, 2) + Expect header[0] == "Docstring for class builtins.ImportError" + Expect header[1] == "========================================" + let content = join(getline(3, '$'), "\n") + Expect stridx(content, "Import can't find module") > 0 + normal K + Expect bufname('%') == '' + end + + it 'no documentation' + put = 'x = 2' + normal oGK + Expect bufname('%') == '' + end +end + +" vim: et:ts=4:sw=4 diff --git a/bundle/jedi-vim/test/vspec/goto.vim b/bundle/jedi-vim/test/vspec/goto.vim new file mode 100644 index 000000000..f5116f228 --- /dev/null +++ b/bundle/jedi-vim/test/vspec/goto.vim @@ -0,0 +1,177 @@ +let mapleader = '\' +source plugin/jedi.vim +source test/_utils.vim + +describe 'goto simple:' + before + new + set filetype=python + put =[ + \ 'def a(): pass', + \ 'b = a', + \ 'c = b', + \ ] + normal! ggdd + normal! G$ + Expect line('.') == 3 + end + + after + bd! + end + + it 'goto definitions' + normal \d + Expect line('.') == 2 + Expect col('.') == 1 + end + + it 'goto assignments' + normal \g + Expect line('.') == 2 + Expect col('.') == 1 + + " cursor before `=` means that it stays there. + normal \g + Expect line('.') == 2 + Expect col('.') == 1 + + " going to the last line changes it. + normal! $ + normal \g + Expect line('.') == 1 + Expect col('.') == 5 + end +end + + +describe 'goto with tabs:' + before + set filetype=python + let g:jedi#use_tabs_not_buffers = 1 + end + + after + try | %bwipeout! | catch | endtry + end + + it 'follow import' + put = ['import subprocess', 'subprocess'] + normal G\g + Expect getline('.') == 'import subprocess' + Expect line('.') == 2 + Expect col('.') == 8 + + normal G\d + Expect CurrentBufferIsModule('subprocess') == 1 + Expect line('.') == 1 + Expect col('.') == 1 + Expect tabpagenr('$') == 2 + Expect winnr('$') == 1 + bwipe + + Expect tabpagenr('$') == 1 + Expect bufname('%') == '' + end +end + + +describe 'goto with buffers' + before + set filetype=python + let g:jedi#use_tabs_not_buffers = 0 + end + + after + try | %bwipeout! | catch | endtry + set nohidden + end + + it 'no new tabs' + put = ['import os'] + normal G$ + call jedi#goto_assignments() + python3 jedi_vim.goto() + Expect CurrentBufferIsModule('os') == 0 + " Without hidden, it's not possible to open a new buffer, when the old + " one is not saved. + set hidden + call jedi#goto_assignments() + Expect CurrentBufferIsModule('os') == 1 + Expect winnr('$') == 1 + Expect tabpagenr('$') == 1 + Expect line('.') == 1 + Expect col('.') == 1 + end +end + + + +describe 'goto with splits' + before + enew! + set filetype=python + let g:jedi#use_splits_not_buffers = 'left' + end + + after + try | %bwipeout! | catch | endtry + end + + it 'follow import' + put = ['import subprocess', 'subprocess'] + normal G\g + Expect getline('.') == 'import subprocess' + Expect line('.') == 2 + Expect col('.') == 8 + + normal G\d + Expect CurrentBufferIsModule('subprocess') == 1 + Expect line('.') == 1 + Expect col('.') == 1 + Expect winnr('$') == 2 + wincmd l + Expect bufname('%') == '' + end + +end + + +describe 'goto wildignore' + before + enew! + set filetype=python + set wildignore=*,with\ spaces,*.pyc + set hidden + let g:jedi#use_tag_stack = 1 + let g:jedi#use_tabs_not_buffers = 0 + " Need to use splits for code coverage in new_buffer() + let g:jedi#use_splits_not_buffers = 1 + + put = ['from subprocess import Popen', 'Popen'] + Expect CurrentBufferIsModule('subprocess') == 0 + normal G + end + + after + try | %bwipeout! | catch | endtry + set wildignore&vim + end + + it 'restores wildignore' + let before = &wildignore + call jedi#goto() + Expect getline('.') =~ 'Popen' + Expect &wildignore == before + end + + it 'not using tagstack' + let g:jedi#use_tag_stack = 0 + call jedi#goto() + Expect CurrentBufferIsModule('subprocess') == 1 + Expect getline('.') =~ 'Popen' + end +end + + +" vim: et:ts=4:sw=4 diff --git a/bundle/jedi-vim/test/vspec/jedi_debug_info.vim b/bundle/jedi-vim/test/vspec/jedi_debug_info.vim new file mode 100644 index 000000000..a7b9b014f --- /dev/null +++ b/bundle/jedi-vim/test/vspec/jedi_debug_info.vim @@ -0,0 +1,13 @@ +source plugin/jedi.vim + +describe 'JediDebugInfo' + it 'works' + redir @a | JediDebugInfo | redir END + let output = split(@a, '\n') + Expect output[0] == 'You should run this in a buffer with filetype "python".' + Expect output[1] == '#### Jedi-vim debug information' + Expect output[-1] == '' + end +end + +" vim: et:ts=4:sw=4 diff --git a/bundle/jedi-vim/test/vspec/pyimport.vim b/bundle/jedi-vim/test/vspec/pyimport.vim new file mode 100644 index 000000000..fb4bc52f3 --- /dev/null +++ b/bundle/jedi-vim/test/vspec/pyimport.vim @@ -0,0 +1,34 @@ +source plugin/jedi.vim +source test/_utils.vim + +describe 'pyimport' + before + let g:jedi#use_tabs_not_buffers = 1 + let g:jedi#project_path = 'autoload' + end + + after + try | %bwipeout! | catch | endtry + unlet g:jedi#project_path + end + + it 'open_tab' + Pyimport os + Expect CurrentBufferIsModule('os') == 1 + Pyimport subprocess + Expect CurrentBufferIsModule('subprocess') == 1 + " the empty tab is sometimes also a tab + Expect tabpagenr('$') >= 2 + end + + it 'completion' + " don't know how to test this directly + "execute "Pyimport subproc\" + "Expect CurrentBufferIsModule('subprocess') == 1 + + Expect jedi#py_import_completions('subproc', 0, 0) == 'subprocess' + Expect jedi#py_import_completions('subprocess', 0, 0) == 'subprocess' + let g:comp = jedi#py_import_completions('sre_', 0, 0) + Expect g:comp == "sre_compile\nsre_constants\nsre_parse" + end +end diff --git a/bundle/jedi-vim/test/vspec/signatures.vim b/bundle/jedi-vim/test/vspec/signatures.vim new file mode 100644 index 000000000..8ac0ed958 --- /dev/null +++ b/bundle/jedi-vim/test/vspec/signatures.vim @@ -0,0 +1,143 @@ +source plugin/jedi.vim + +describe 'signatures' + before + enew + set filetype=python + end + + after + try | %bwipeout! | catch | endtry + end + + it 'simple' + normal odef xyz(number): return + normal o + normal oxyz() + doautocmd CursorHoldI + Expect getline(3) == '?!?jedi=0, ?!? (*_*number*_*) ?!?jedi?!?' + + doautocmd InsertLeave + Expect getline(3) == '' + end + + it 'multiple buffers' + set hidden + new + setfiletype python + redir => autocmds + autocmd jedi_call_signatures * + redir END + Expect autocmds =~# 'jedi_call_signatures' + buffer # + redir => autocmds + autocmd jedi_call_signatures * + redir END + Expect autocmds =~# 'jedi_call_signatures' + end + + it 'simple after CursorHoldI with only parenthesis' + noautocmd normal o + doautocmd CursorHoldI + noautocmd normal istaticmethod() + doautocmd CursorHoldI + Expect getline(1) == '?!?jedi=0, ?!? (*_*f: Callable[..., Any]*_*) ?!?jedi?!?' + end + + it 'highlights correct argument' + noautocmd normal o + doautocmd CursorHoldI + noautocmd normal iformat(42, "x") + " Move to x - highlights "x". + noautocmd normal 2h + doautocmd CursorHoldI + Expect getline(1) == '?!?jedi=0, ?!? (value: object, *_*format_spec: str=...*_*) ?!?jedi?!?' + " Move left to 42 - hightlights first argument ("value"). + noautocmd normal 4h + doautocmd CursorHoldI + Expect getline(1) == '?!?jedi=0, ?!? (*_*value: object*_*, format_spec: str=...) ?!?jedi?!?' + end + + it 'no signature' + exe 'normal ostr ' + python3 jedi_vim.show_call_signatures() + Expect getline(1, '$') == ['', 'str '] + end + + it 'signatures disabled' + let g:jedi#show_call_signatures = 0 + + exe 'normal ostr( ' + python3 jedi_vim.show_call_signatures() + Expect getline(1, '$') == ['', 'str( '] + + let g:jedi#show_call_signatures = 1 + end + + it 'command line simple' + let g:jedi#show_call_signatures = 2 + call jedi#configure_call_signatures() + + exe 'normal ostaticmethod( ' + redir => msg + python3 jedi_vim.show_call_signatures() + redir END + Expect msg == "\nstaticmethod(f: Callable[..., Any])" + + redir => msg + doautocmd InsertLeave + redir END + Expect msg == "\n" + + normal Sdef foo(a, b): pass + exe 'normal ofoo(a, b, c, ' + redir => msg + python3 jedi_vim.show_call_signatures() + redir END + Expect msg == "\nfoo(a, b)" + end + + it 'command line truncation' + let g:jedi#show_call_signatures = 2 + call jedi#configure_call_signatures() + + function! Signature() + redir => msg + python3 jedi_vim.show_call_signatures() + redir END + return msg + endfunction + + let funcname = repeat('a', &columns - (30 + (&ruler ? 18 : 0))) + put = 'def '.funcname.'(arg1, arg2, arg3, a, b, c):' + put = ' pass' + execute "normal o\".funcname."( " + Expect Signature() == "\n".funcname."(arg1, …)" + + exe 'normal sarg1, ' + Expect Signature() == "\n".funcname."(…, arg2, …)" + + exe 'normal sarg2, arg3, ' + Expect Signature() == "\n".funcname."(…, a, b, c)" + + exe 'normal sa, b, ' + Expect Signature() == "\n".funcname."(…, c)" + + g/^/d + put = 'def '.funcname.'('.repeat('b', 20).', arg2):' + put = ' pass' + execute "normal o\".funcname."( " + Expect Signature() == "\n".funcname."(…)" + end + + it 'command line no signature' + let g:jedi#show_call_signatures = 2 + call jedi#configure_call_signatures() + + exe 'normal ostr ' + redir => msg + python3 jedi_vim.show_call_signatures() + redir END + Expect msg == "\n" + end +end