1
0
mirror of https://github.com/SpaceVim/SpaceVim.git synced 2025-01-23 07:10:06 +08:00

chore(jedi): use bundle jedi-vim

This commit is contained in:
wsdjeg 2022-10-23 15:41:52 +08:00
parent b2af9f5479
commit a3bd1d31c7
528 changed files with 70565 additions and 1 deletions

View File

@ -106,7 +106,7 @@ function! SpaceVim#layers#lang#python#plugins() abort
" but we need to disable the completions of jedi-vim.
let g:jedi#completions_enabled = 0
endif
call add(plugins, ['davidhalter/jedi-vim', { 'on_ft' : 'python',
call add(plugins, [g:_spacevim_root_dir . 'bundle/jedi-vim', { 'on_ft' : 'python',
\ 'if' : has('python') || has('python3')}])
endif
call add(plugins, ['heavenshell/vim-pydocstring',

1
bundle/jedi-vim/.github/FUNDING.yml vendored Normal file
View File

@ -0,0 +1 @@
github: [davidhalter]

View File

@ -0,0 +1,44 @@
### Issue
<!--
Please describe the issue here.
If you are not using jedi-vim from Git (but e.g. from a distribution's package,
please try it with jedi-vim's Git master, too).
-->
### Steps to reproduce
<!--
Include if relevant.
Please provide steps to reproduce it here, preferably based on a minimal Vim
configuration.
You can use the following template (save it as `minimal.vimrc` in the directory
where jedi-vim is installed, `cd` into that directory, and run Vim with
`vim -u minimal.vimrc`):
```
set nocompatible
let script_dir = fnamemodify(expand('<sfile>'), ':h')
let &runtimepath .= ','.script_dir.','.script_dir.'/after'
" Put your config changes here.
" let g:jedi#show_call_signatures=1
syntax on
filetype plugin indent on
```
Please provide the `minimal.vimrc` you have used here, too.
-->
### Output of “:verbose JediDebugInfo”
<!--
Please execute `:redir @+> | silent verb JediDebugInfo | redir END` in a
Python buffer to copy debug information into your clipboard.
Then paste it here.
-->

View File

@ -0,0 +1,63 @@
name: ci
on: [push, pull_request]
jobs:
tests:
runs-on: ubuntu-20.04
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
submodules: recursive
- name: Setup
run: |
sudo pip install pytest
vim --version
#- name: Setup tmate session
# uses: mxschmitt/action-tmate@v3
- name: Run tests
run: 'make test'
code-quality:
runs-on: ubuntu-20.04
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
submodules: recursive
- name: Run tests
run: |
vim --version
make check
coverage:
runs-on: ubuntu-20.04
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
submodules: recursive
- name: Install dependencies
run: |
sudo add-apt-repository ppa:neovim-ppa/stable -y
sudo apt-get update -q
sudo apt-get install neovim -y
sudo pip install pynvim pytest-cov
sudo pip list
nvim --version
- name: Run tests
run: |
make --keep-going test_coverage BUILD_VIRTUAL_ENV=$VIRTUAL_ENV
- name: Upload coverage data
run: |
coverage xml
bash <(curl -s https://codecov.io/bash) -X fix -f coverage.xml -F py${TRAVIS_PYTHON_VERSION//./}

8
bundle/jedi-vim/.gitignore vendored Normal file
View File

@ -0,0 +1,8 @@
*~
*.sw?
*.py[cod]
.ropeproject
doc/tags
.pytest-cache
build
.coverage*

6
bundle/jedi-vim/.gitmodules vendored Normal file
View File

@ -0,0 +1,6 @@
[submodule "jedi"]
path = pythonx/jedi
url = https://github.com/davidhalter/jedi.git
[submodule "pythonx/parso"]
path = pythonx/parso
url = https://github.com/davidhalter/parso.git

31
bundle/jedi-vim/.travis.yml vendored Normal file
View File

@ -0,0 +1,31 @@
dist: bionic
language: python
python: 3.8
env:
- ENV=test
- ENV=check
- ENV=test_coverage
install:
- |
if [ "$ENV" = "test" ]; then
pip install pytest
elif [ "$ENV" = "test_coverage" ]; then
sudo add-apt-repository ppa:neovim-ppa/stable -y
sudo apt-get update -q
sudo apt-get install neovim -y
pip install pynvim pytest-cov
pip list
nvim --version
else
vim --version
fi
script:
- make --keep-going "$ENV" BUILD_VIRTUAL_ENV=$VIRTUAL_ENV
after_script:
- |
if [ "$ENV" = "test_coverage" ]; then
coverage xml
travis_retry bash <(curl -s https://codecov.io/bash) -X fix -f coverage.xml -F py${TRAVIS_PYTHON_VERSION//./}
fi

61
bundle/jedi-vim/AUTHORS.txt vendored Normal file
View File

@ -0,0 +1,61 @@
Main Authors
============
David Halter (@davidhalter) <davidhalter88@gmail.com>
Contributors (in order of contributions)
========================================
Patrice Peterson (@runiq)
tek (@tek)
heavenshell (@heavenshell) <heavenshell.jp@gmail.com>
Danilo Bargen (@dbrgn) <gezuru@gmail.com>
mattn (@mattn) <mattn.jp@gmail.com>
Enrico Batista da Luz (@ricobl) <rico.bl@gmail.com>
coot (@coot) <mszamot@gmail.com>
Artur Dryomov (@ming13) <artur.dryomov@gmail.com>
andviro (@andviro)
Jean-Louis Fuchs (@ganwell) <ganwell@fangorn.ch>
Mathieu Comandon (@strycore) <strider@strycore.com>
Nick Hurley (@todesschaf) <hurley@todesschaf.org>
gpoulin (@gpoulin)
Akinori Hattori (@hattya)
Luper Rouch (@flupke)
Matthew Moses (@mlmoses) <moses.matthewl@gmail.com>
Tyler Wymer (@twymer)
Artem Nezvigin (@artnez)
rogererens (@rogererens)
Emily Strickland (@emilyst) <mail@emily.st>
Tin Tvrtković (@Tinche) <tinchester@gmail.com>
Zekeriya Koc (@zekzekus) <zekzekus@gmail.com>
ethinx (@ethinx) <eth2net@gmail.com>
Wouter Overmeire (@lodagro) <lodagro@gmail.com>
Stephen J. Fuhry (@fuhrysteve) <fuhrysteve@gmail.com>
Sheng Yun (@ShengYun) <uewing@gmail.com>
Yann Thomas-Gérard (@inside) <inside@gmail.com>
Colin Su (@littleq0903) <littleq0903@gmail.com>
Arthur Jaron (@eyetracker)
Justin M. Keyes (@justinmk)
nagev (@np1)
Chris Lasher (@gotgenes) <chris.lasher@gmail.com>
Doan Thanh Nam (@tndoan)
Markus Koller (@toupeira)
Justin Cheevers @justincheevers
Talha Ahmed (@talha81) <talha.ahmed@gmail.com>
Matthew Tylee Atkinson (@matatk)
Pedro Ferrari (@petobens)
Daniel Hahler (@blueyed)
Dave Honneffer (@pearofducks)
Bagrat Aznauryan (@n9code)
Tomoyuki Kashiro (@kashiro)
Tommy Allen (@tweekmonster)
Mingliang (@Aulddays)
Brian Mego (@brianmego)
Stevan Milic (@stevanmilic) <stevan.milic@yahoo.com>
Konstantin Glukhov (@Konstantin-Glukhov)
Seungchan An (@SeungChan92) <dev.issea1015@gmail.com>
Thomas Blauth (@ThomasBlauth) <thomas.blauth@protonmail.com>
James Cherti (@jamescherti)
@something are github user names.

12
bundle/jedi-vim/CONTRIBUTING.md vendored Normal file
View File

@ -0,0 +1,12 @@
# We <3 pull requests!
1. Fork the Repo on github.
2. Add yourself to AUTHORS.txt
3. Add a test if possible.
4. Push to your fork and submit a pull request.
Please use PEP8 as a Python code style. For VIM, just try to style your
code similar to the jedi-vim code that is already there.
# Bug reports
Please include the output of `:version` and `:JediDebugInfo`.

21
bundle/jedi-vim/LICENSE.txt vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) <2013> <David Halter and others, see AUTHORS.txt>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

35
bundle/jedi-vim/Makefile vendored Normal file
View File

@ -0,0 +1,35 @@
BUILD_VIRTUAL_ENV:=build/venv
test:
pytest
test_nvim:
VSPEC_VIM=nvim pytest
test_coverage: export PYTEST_ADDOPTS:=--cov pythonx --cov test --cov-report=term-missing:skip-covered
test_coverage: test_nvim
$(dir $(BUILD_VIRTUAL_ENV)):
mkdir -p $@
$(BUILD_VIRTUAL_ENV): | $(dir $(BUILD_VIRTUAL_ENV))
python -m venv $@
$(BUILD_VIRTUAL_ENV)/bin/vint: | $(BUILD_VIRTUAL_ENV)
$|/bin/python -m pip install vim-vint==0.3.21
$(BUILD_VIRTUAL_ENV)/bin/flake8: | $(BUILD_VIRTUAL_ENV)
$|/bin/python -m pip install -q flake8==3.7.8
vint: $(BUILD_VIRTUAL_ENV)/bin/vint
$(BUILD_VIRTUAL_ENV)/bin/vint after autoload ftplugin plugin
flake8: $(BUILD_VIRTUAL_ENV)/bin/flake8
$(BUILD_VIRTUAL_ENV)/bin/flake8 pythonx/jedi_*.py
check: vint flake8
clean:
rm -rf build
.PHONY: test check clean vint flake8

293
bundle/jedi-vim/README.rst vendored Normal file
View File

@ -0,0 +1,293 @@
.. image:: https://github.com/davidhalter/jedi-vim/blob/master/doc/logotype-a.svg
#################################################
jedi-vim - awesome Python autocompletion with VIM
#################################################
.. image:: https://travis-ci.org/davidhalter/jedi-vim.svg?branch=master
:target: https://travis-ci.org/davidhalter/jedi-vim
:alt: Travis-CI build status
jedi-vim is a VIM binding to the autocompletion library
`Jedi <http://github.com/davidhalter/jedi>`_.
Here are some pictures:
.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_complete.png
Completion for almost anything (Ctrl+Space).
.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_function.png
Display of function/class bodies, docstrings.
.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_pydoc.png
Documentation (Pydoc) support (with highlighting, Shift+k).
There is also support for goto and renaming.
Get the latest from `github <http://github.com/davidhalter/jedi-vim>`_.
Documentation
=============
Documentation is available in your vim: ``:help jedi-vim``. You can also look
it up `on github <http://github.com/davidhalter/jedi-vim/blob/master/doc/jedi-vim.txt>`_.
You can read the Jedi library documentation `here <http://jedi.readthedocs.io/en/latest/>`_.
If you want to report issues, just use the github issue tracker. In case of
questions about the software, please use `stackoverflow
<https://stackoverflow.com/questions/tagged/jedi-vim>`_ and tag your question with ``jedi-vim``.
Contributing
============
We love Pull Requests! Read the instructions in ``CONTRIBUTING.md``.
Features
========
The Jedi library understands most of Python's core features. From decorators to
generators, there is broad support.
Apart from that, jedi-vim supports the following commands
- Completion ``<C-Space>``
- Goto assignment ``<leader>g`` (typical goto function)
- Goto definition ``<leader>d`` (follow identifier as far as possible,
includes imports and statements)
- Goto (typing) stub ``<leader>s``
- Show Documentation/Pydoc ``K`` (shows a popup with assignments)
- Renaming ``<leader>r``
- Usages ``<leader>n`` (shows all the usages of a name)
- Open module, e.g. ``:Pyimport os`` (opens the ``os`` module)
Installation
============
Requirements
------------
You need a VIM version that was compiled with Python 2.7 or later
(``+python`` or ``+python3``). You can check this from within VIM using
``:python3 import sys; print(sys.version)`` (use ``:python`` for Python 2).
Manual installation
-------------------
You might want to use `pathogen <https://github.com/tpope/vim-pathogen>`_ or
`Vundle <https://github.com/gmarik/vundle>`_ to install jedi-vim.
The first thing you need after that is an up-to-date version of Jedi. Install
``git submodule update --init --recursive`` in your jedi-vim repository.
Example installation command using Pathogen:
.. code-block:: sh
git clone --recursive https://github.com/davidhalter/jedi-vim.git ~/.vim/bundle/jedi-vim
Example installation using Vundle:
Add the following line in your `~/.vimrc`
.. code-block:: vim
Plugin 'davidhalter/jedi-vim'
For installing Jedi, ``pip install jedi`` will also work, but you might run
into issues when working in virtual environments. Please use git submodules.
Installation with your distribution
-----------------------------------
On Arch Linux, you can also install jedi-vim from official repositories as
`vim-jedi <https://www.archlinux.org/packages/community/any/vim-jedi/>`__.
It is also available on
`Debian (≥8) <https://packages.debian.org/vim-python-jedi>`__ and
`Ubuntu (≥14.04) <http://packages.ubuntu.com/vim-python-jedi>`__ as
vim-python-jedi.
On Fedora Linux, it is available as
`vim-jedi <https://apps.fedoraproject.org/packages/vim-jedi>`__.
Please note that this version might be quite old compared to using jedi-vim
from Git.
Caveats
-------
Note that the `python-mode <https://github.com/klen/python-mode>`_ VIM plugin seems
to conflict with jedi-vim, therefore you should disable it before enabling
jedi-vim.
To enjoy the full features of jedi-vim, you should have VIM >= 7.3, compiled with
``+conceal`` (which is not the case on some platforms, including OS X). If your VIM
does not meet these requirements, the parameter recommendation list may not appear
when you type an open bracket after a function name. Please read
`the documentation <http://github.com/davidhalter/jedi-vim/blob/master/doc/jedi-vim.txt>`_
for details.
Settings
========
Jedi is by default automatically initialized. If you don't want that I suggest
you disable the auto-initialization in your ``.vimrc``:
.. code-block:: vim
let g:jedi#auto_initialization = 0
There are also some VIM options (like ``completeopt`` and key defaults) which
are automatically initialized, but you can skip this:
.. code-block:: vim
let g:jedi#auto_vim_configuration = 0
You can make jedi-vim use tabs when going to a definition etc:
.. code-block:: vim
let g:jedi#use_tabs_not_buffers = 1
If you are a person who likes to use VIM-splits, you might want to put this in your ``.vimrc``:
.. code-block:: vim
let g:jedi#use_splits_not_buffers = "left"
This options could be "left", "right", "top", "bottom" or "winwidth". It will decide the direction where the split open.
Jedi automatically starts the completion, if you type a dot, e.g. ``str.``, if
you don't want this:
.. code-block:: vim
let g:jedi#popup_on_dot = 0
Jedi selects the first line of the completion menu: for a better typing-flow
and usually saves one keypress.
.. code-block:: vim
let g:jedi#popup_select_first = 0
Jedi displays function call signatures in insert mode in real-time, highlighting
the current argument. The call signatures can be displayed as a pop-up in the
buffer (set to 1 by default (with the conceal feature), 2 otherwise),
which has the advantage of being easier to refer to (but is a hack with
many drawbacks since it changes the buffer's contents),
or in Vim's command line aligned with the function call (set to 2), which
can improve the integrity of Vim's undo history.
.. code-block:: vim
let g:jedi#show_call_signatures = "1"
Here are a few more defaults for actions, read the docs (``:help jedi-vim``) to
get more information. If you set them to ``""``, they are not assigned.
.. code-block:: vim
NOTE: subject to change!
let g:jedi#goto_command = "<leader>d"
let g:jedi#goto_assignments_command = "<leader>g"
let g:jedi#goto_stubs_command = "<leader>s"
let g:jedi#goto_definitions_command = ""
let g:jedi#documentation_command = "K"
let g:jedi#usages_command = "<leader>n"
let g:jedi#completions_command = "<C-Space>"
let g:jedi#rename_command = "<leader>r"
An example for setting up your project:
.. code-block:: vim
let g:jedi#environment_path = "/usr/bin/python3.9"
jedi-vim tries its best to guess your virtual env. If you want to work with a
specific virtual environment however, you can point jedi-vim towards it:
.. code-block:: vim
let g:jedi#environment_path = "venv"
Finally, if you don't want completion, but all the other features, use:
.. code-block:: vim
let g:jedi#completions_enabled = 0
FAQ
===
I want to use Jedi with a Python 2 Environment, but it's not listed under "Known environments"
----------------------------------------------------------------------------------------------
Starting with version 0.18.0 Jedi dropped support for Python 2.
I don't want the docstring window to popup during completion
------------------------------------------------------------
This depends on the ``completeopt`` option. Jedi initializes it in its
``ftplugin``. Add the following line to your ``.vimrc`` to disable it:
.. code-block:: vim
autocmd FileType python setlocal completeopt-=preview
I want <Tab> to do autocompletion
---------------------------------
Don't even think about changing the Jedi command to ``<Tab>``,
use `supertab <https://github.com/ervandew/supertab>`_!
The completion is too slow!
---------------------------
1. Completion of complex libraries (like Numpy) should only be slow the first
time you complete them. After that the results should be cached and very fast.
2. If it is still slow after the initial completion and you have installed the
python-mode Vim plugin, try disabling its rope mode:
.. code-block:: vim
let g:pymode_rope = 0
See issue `#163 <https://github.com/davidhalter/jedi-vim/issues/163>`__.
3. You can also use `deoplete-jedi <https://github.com/zchee/deoplete-jedi>`__
for completions, which uses Jedi, but does completions asynchronously
(requires Neovim).
It makes sense to use both jedi-vim and deoplete-jedi, but you should disable
jedi-vim's completions then:
.. code-block:: vim
let g:jedi#completions_enabled = 0
Testing
=======
jedi-vim is being tested with a combination of `vspec
<https://github.com/kana/vim-vspec>`_ and `py.test <http://pytest.org/>`_.
The tests are in the ``test`` subdirectory, you can run them calling::
py.test
The tests are automatically run with `travis
<https://travis-ci.org/davidhalter/jedi-vim>`_.

View File

@ -0,0 +1,3 @@
if jedi#init_python() && g:jedi#auto_initialization && g:jedi#completions_enabled
call jedi#setup_completion()
endif

34
bundle/jedi-vim/after/syntax/python.vim vendored Normal file
View File

@ -0,0 +1,34 @@
if !jedi#init_python()
finish
endif
if g:jedi#show_call_signatures > 0 && has('conceal')
" +conceal is the default for vim >= 7.3
let s:e = g:jedi#call_signature_escape
let s:full = s:e.'jedi=.\{-}'.s:e.'.\{-}'.s:e.'jedi'.s:e
let s:ignore = s:e.'jedi.\{-}'.s:e
exe 'syn match jediIgnore "'.s:ignore.'" contained conceal'
setlocal conceallevel=2
syn match jediFatSymbol "\*_\*" contained conceal
syn match jediFat "\*_\*.\{-}\*_\*" contained contains=jediFatSymbol
syn match jediSpace "\v[ ]+( )@=" contained
exe 'syn match jediFunction "'.s:full.'" keepend extend '
\ .' contains=jediIgnore,jediFat,jediSpace'
\ .' containedin=pythonComment,pythonString,pythonRawString'
unlet! s:e s:full s:ignore
hi def link jediIgnore Ignore
hi def link jediFatSymbol Ignore
hi def link jediSpace Normal
if exists('g:colors_name')
hi def link jediFunction CursorLine
hi def link jediFat TabLine
else
hi def jediFunction term=NONE cterm=NONE ctermfg=6 guifg=Black gui=NONE ctermbg=0 guibg=Grey
hi def jediFat term=bold,underline cterm=bold,underline gui=bold,underline ctermbg=0 guibg=#555555
endif
endif
hi def jediUsage cterm=reverse gui=standout

View File

@ -0,0 +1,4 @@
function! health#jedi#check() abort
call health#report_start('jedi')
silent call jedi#debug_info()
endfunction

732
bundle/jedi-vim/autoload/jedi.vim vendored Normal file
View File

@ -0,0 +1,732 @@
scriptencoding utf-8
" ------------------------------------------------------------------------
" Settings initialization
" ------------------------------------------------------------------------
let s:deprecations = {
\ 'get_definition_command': 'goto_definitions_command',
\ 'pydoc': 'documentation_command',
\ 'related_names_command': 'usages_command',
\ 'autocompletion_command': 'completions_command',
\ 'show_function_definition': 'show_call_signatures',
\ }
let s:default_settings = {
\ 'use_tabs_not_buffers': 0,
\ 'use_splits_not_buffers': 1,
\ 'auto_initialization': 1,
\ 'auto_vim_configuration': 1,
\ 'goto_command': "'<leader>d'",
\ 'goto_assignments_command': "'<leader>g'",
\ 'goto_definitions_command': "''",
\ 'goto_stubs_command': "'<leader>s'",
\ 'completions_command': "'<C-Space>'",
\ 'call_signatures_command': "'<leader>n'",
\ 'usages_command': "'<leader>n'",
\ 'rename_command': "'<leader>r'",
\ 'completions_enabled': 1,
\ 'popup_on_dot': 'g:jedi#completions_enabled',
\ 'documentation_command': "'K'",
\ 'show_call_signatures': has('conceal') ? 1 : 2,
\ 'show_call_signatures_delay': 500,
\ 'call_signature_escape': "'?!?'",
\ 'auto_close_doc': 1,
\ 'max_doc_height': 30,
\ 'popup_select_first': 1,
\ 'quickfix_window_height': 10,
\ 'force_py_version': "'auto'",
\ 'environment_path': "'auto'",
\ 'added_sys_path': '[]',
\ 'project_path': "'auto'",
\ 'smart_auto_mappings': 0,
\ 'case_insensitive_completion': 1,
\ 'use_tag_stack': 1
\ }
for [s:key, s:val] in items(s:deprecations)
if exists('g:jedi#'.s:key)
echom "'g:jedi#".s:key."' is deprecated. Please use 'g:jedi#".s:val."' instead. Sorry for the inconvenience."
exe 'let g:jedi#'.s:val.' = g:jedi#'.s:key
endif
endfor
for [s:key, s:val] in items(s:default_settings)
if !exists('g:jedi#'.s:key)
exe 'let g:jedi#'.s:key.' = '.s:val
endif
endfor
let s:supports_buffer_usages = has('nvim') || exists('*prop_add')
" ------------------------------------------------------------------------
" Python initialization
" ------------------------------------------------------------------------
let s:script_path = expand('<sfile>:p:h:h')
function! s:init_python() abort
" Use g:jedi#force_py_version for loading Jedi, or fall back to using
" `has()` - preferring Python 3.
if !has('python3')
throw 'jedi-vim requires Vim with support for Python 3.'
endif
call jedi#setup_python_imports()
return 1
endfunction
function! jedi#reinit_python() abort
let s:_init_python = -1
call jedi#init_python()
endfunction
" This is meant to be called with `:unsilent` (for &shortmess+=F).
function! s:display_exception() abort
let error_lines = split(v:exception, '\n')
let msg = 'Error: jedi-vim failed to initialize Python: '
\ .error_lines[0].' (in '.v:throwpoint.')'
if len(error_lines) > 1
echohl ErrorMsg
echom 'jedi-vim error: '.error_lines[0]
for line in error_lines[1:]
echom line
endfor
echohl None
let help_cmd = ':JediDebugInfo'
if exists(':checkhealth') == 2
let help_cmd .= ' / :checkhealth'
endif
let msg .= printf('. See :messages and/or %s for more information.',
\ help_cmd)
endif
redraw " Redraw to only have the main message by default.
echoerr msg
endfunction
let s:_init_python = -1
function! jedi#init_python() abort
if s:_init_python == -1
let s:_init_python = 0
try
let s:_init_python = s:init_python()
let s:_init_python = 1
catch /^jedi/
" Only catch errors from jedi-vim itself here, so that for
" unexpected Python exceptions the traceback will be shown
" (e.g. with NameError in jedi#setup_python_imports's code).
if !exists('g:jedi#squelch_py_warning')
unsilent call s:display_exception()
endif
endtry
endif
return s:_init_python
endfunction
function! jedi#setup_python_imports() abort
let g:_jedi_init_error = 0
let init_lines = [
\ 'import vim',
\ 'def _jedi_handle_exc(exc_info):',
\ ' try:',
\ ' from jedi_vim_debug import format_exc_info',
\ ' vim.vars["_jedi_init_error"] = format_exc_info(exc_info)',
\ ' except Exception:',
\ ' import traceback',
\ ' vim.vars["_jedi_init_error"] = "\\n".join(traceback.format_exception(*exc_info))',
\ 'try:',
\ ' import jedi_vim',
\ ' if hasattr(jedi_vim, "jedi_import_error"):',
\ ' _jedi_handle_exc(jedi_vim.jedi_import_error)',
\ 'except Exception as exc:',
\ ' _jedi_handle_exc(sys.exc_info())',
\ ]
exe 'python3 exec('''.escape(join(init_lines, '\n'), "'").''')'
if g:_jedi_init_error isnot 0
throw printf('jedi#setup_python_imports: %s', g:_jedi_init_error)
endif
return 1
endfunction
function! jedi#debug_info() abort
if &verbose
if &filetype !=# 'python'
echohl WarningMsg | echo 'You should run this in a buffer with filetype "python".' | echohl None
endif
endif
let spath = shellescape(s:script_path)
echo '#### Jedi-vim debug information'
echo "\n"
echo '##### jedi-vim version'
echo "\n"
echo ' - jedi-vim git version: '
echon substitute(system('git -C '.spath.' describe --tags --always --dirty'), '\v\n$', '', '')
echo ' - jedi git submodule status: '
echon substitute(system('git -C '.spath.' submodule status pythonx/jedi'), '\v\n$', '', '')
echo ' - parso git submodule status: '
echon substitute(system('git -C '.spath.' submodule status pythonx/parso'), '\v\n$', '', '')
echo "\n"
echo '##### Global Python'
echo "\n"
echo 'Using Python version 3 to access Jedi.'
let s:pythonjedi_called = 0
try
python3 import vim; vim.command('let s:pythonjedi_called = 1')
catch
echo 'Error when trying to import vim: '.v:exception
endtry
if !s:pythonjedi_called
echohl WarningMsg
echom 'python3 failed to run, likely a Python config issue.'
if exists(':checkhealth') == 2
echom 'Try :checkhealth for more information.'
endif
echohl None
else
try
python3 from jedi_vim_debug import display_debug_info
python3 display_debug_info()
catch
echohl WarningMsg
echo 'Error when running display_debug_info: '.v:exception
echohl None
endtry
endif
echo "\n"
echo '##### Settings'
echo "\n"
echo '```'
let jedi_settings = items(filter(copy(g:), "v:key =~# '\\v^jedi#'"))
let has_nondefault_settings = 0
for [k, V] in jedi_settings
exe 'let default = '.get(s:default_settings,
\ substitute(k, '\v^jedi#', '', ''), "'-'")
" vint: -ProhibitUsingUndeclaredVariable
if default !=# V
echo printf('g:%s = %s (default: %s)', k, string(V), string(default))
unlet! V " Fix variable type mismatch with Vim 7.3.
let has_nondefault_settings = 1
endif
" vint: +ProhibitUsingUndeclaredVariable
endfor
if has_nondefault_settings
echo "\n"
endif
verb set omnifunc? completeopt?
echo '```'
if &verbose
echo "\n"
echo '#### :version'
echo '```'
version
echo '```'
echo "\n"
echo '#### :messages'
echo '```'
messages
echo '```'
echo "\n"
echo '<details><summary>:scriptnames</summary>'
echo "\n"
echo '```'
scriptnames
echo '```'
echo '</details>'
endif
endfunction
" Helper function instead of `python vim.eval()`, and `.command()` because
" these also return error definitions.
function! jedi#_vim_exceptions(str, is_eval) abort
let l:result = {}
try
if a:is_eval
let l:result.result = eval(a:str)
else
execute a:str
let l:result.result = ''
endif
catch
let l:result.exception = v:exception
let l:result.throwpoint = v:throwpoint
endtry
return l:result
endfunction
call jedi#init_python() " Might throw an error.
" ------------------------------------------------------------------------
" functions that call python code
" ------------------------------------------------------------------------
function! jedi#goto() abort
python3 jedi_vim.goto(mode="goto")
endfunction
function! jedi#goto_assignments() abort
python3 jedi_vim.goto(mode="assignment")
endfunction
function! jedi#goto_definitions() abort
python3 jedi_vim.goto(mode="definition")
endfunction
function! jedi#goto_stubs() abort
python3 jedi_vim.goto(mode="stubs")
endfunction
function! jedi#usages() abort
if exists('#jedi_usages#BufWinEnter')
call jedi#clear_usages()
endif
python3 jedi_vim.usages()
endfunction
if !s:supports_buffer_usages
" Hide usages in the current window.
" Only handles the current window due to matchdelete() restrictions.
function! jedi#_hide_usages_in_win() abort
let winnr = winnr()
let matchids = getwinvar(winnr, '_jedi_usages_vim_matchids', [])
for matchid in matchids[1:]
call matchdelete(matchid)
endfor
call setwinvar(winnr, '_jedi_usages_vim_matchids', [])
" Remove the autocommands that might have triggered this function.
augroup jedi_usages
exe 'autocmd! * <buffer='.winbufnr(winnr).'>'
augroup END
unlet! b:_jedi_usages_needs_clear
endfunction
" Show usages for current window (Vim without textprops only).
function! jedi#_show_usages_in_win() abort
python3 jedi_vim.highlight_usages_for_vim_win()
if !exists('#jedi_usages#TextChanged#<buffer>')
augroup jedi_usages
" Unset highlights on any changes to this buffer.
" NOTE: Neovim's API handles movement of highlights, but would only
" need to clear highlights that are changed inline.
autocmd TextChanged <buffer> call jedi#_clear_buffer_usages()
" Hide usages when the buffer is removed from the window, or when
" entering insert mode (but keep them for later).
autocmd BufWinLeave,InsertEnter <buffer> call jedi#_hide_usages_in_win()
augroup END
endif
endfunction
" Remove usages for the current buffer (and all its windows).
function! jedi#_clear_buffer_usages() abort
let bufnr = bufnr('%')
let nvim_src_ids = getbufvar(bufnr, '_jedi_usages_src_ids', [])
if !empty(nvim_src_ids)
for src_id in nvim_src_ids
" TODO: could only clear highlights below/after changed line?!
call nvim_buf_clear_highlight(bufnr, src_id, 0, -1)
endfor
else
call jedi#_hide_usages_in_win()
endif
endfunction
endif
" Remove/unset global usages.
function! jedi#clear_usages() abort
augroup jedi_usages
autocmd! BufWinEnter
autocmd! WinEnter
augroup END
if !s:supports_buffer_usages
" Vim without textprops: clear current window,
" autocommands will clean others on demand.
call jedi#_hide_usages_in_win()
" Setup autocommands to clear remaining highlights on WinEnter.
augroup jedi_usages
for b in range(1, bufnr('$'))
if getbufvar(b, '_jedi_usages_needs_clear')
exe 'autocmd WinEnter <buffer='.b.'> call jedi#_hide_usages_in_win()'
endif
endfor
augroup END
endif
python3 jedi_vim.clear_usages()
endfunction
function! jedi#rename(...) abort
python3 jedi_vim.rename()
endfunction
function! jedi#rename_visual(...) abort
python3 jedi_vim.rename_visual()
endfunction
function! jedi#completions(findstart, base) abort
python3 jedi_vim.completions()
endfunction
function! jedi#enable_speed_debugging() abort
python3 jedi_vim.jedi.set_debug_function(jedi_vim.print_to_stdout, speed=True, warnings=False, notices=False)
endfunction
function! jedi#enable_debugging() abort
python3 jedi_vim.jedi.set_debug_function(jedi_vim.print_to_stdout)
endfunction
function! jedi#disable_debugging() abort
python3 jedi_vim.jedi.set_debug_function(None)
endfunction
function! jedi#py_import(args) abort
python3 jedi_vim.py_import()
endfun
function! jedi#choose_environment(args) abort
python3 jedi_vim.choose_environment()
endfun
function! jedi#load_project(args) abort
python3 jedi_vim.load_project()
endfun
function! jedi#py_import_completions(argl, cmdl, pos) abort
python3 jedi_vim.py_import_completions()
endfun
function! jedi#clear_cache(bang) abort
if a:bang
python3 jedi_vim.jedi.cache.clear_time_caches(True)
else
python3 jedi_vim.jedi.cache.clear_time_caches(False)
endif
endfunction
" ------------------------------------------------------------------------
" show_documentation
" ------------------------------------------------------------------------
function! jedi#show_documentation() abort
python3 if jedi_vim.show_documentation() is None: vim.command('return')
let bn = bufnr('__doc__')
if bn > 0
let wi=index(tabpagebuflist(tabpagenr()), bn)
if wi >= 0
" If the __doc__ buffer is open in the current tab, jump to it
silent execute (wi+1).'wincmd w'
else
silent execute 'sbuffer '.bn
endif
else
split __doc__
endif
setlocal modifiable
setlocal noswapfile
setlocal buftype=nofile
silent normal! ggdG
silent $put=l:doc
silent normal! 1Gdd
setlocal nomodifiable
setlocal nomodified
setlocal filetype=rst
setlocal foldlevel=200 " do not fold in __doc__
if l:doc_lines > g:jedi#max_doc_height " max lines for plugin
let l:doc_lines = g:jedi#max_doc_height
endif
execute 'resize '.l:doc_lines
" quit comands
nnoremap <buffer> q ZQ
if len(g:jedi#documentation_command)
execute 'nnoremap <buffer> '.g:jedi#documentation_command.' ZQ'
endif
endfunction
" ------------------------------------------------------------------------
" helper functions
" ------------------------------------------------------------------------
function! jedi#add_goto_window(for_usages, len) abort
let height = min([a:len, g:jedi#quickfix_window_height])
" Use :copen to go to the window always - the user should select an entry.
execute 'belowright copen '.height
if &filetype !=# 'qf'
echoerr printf('jedi-vim: unexpected ft with current window (%s), please report!', &filetype)
endif
if g:jedi#use_tabs_not_buffers == 1
noremap <buffer> <CR> :call jedi#goto_window_on_enter()<CR>
endif
augroup jedi_goto_window
if a:for_usages
autocmd BufWinLeave <buffer> call jedi#clear_usages()
else
autocmd WinLeave <buffer> q " automatically leave, if an option is chosen
endif
augroup END
if a:for_usages && !has('nvim')
if s:supports_buffer_usages
" Setup autocommand for pending highlights with Vim's textprops.
" (cannot be added to unlisted buffers)
augroup jedi_usages
autocmd! BufWinEnter * call s:usages_for_pending_buffers()
augroup END
else
" Setup global autocommand to display any usages for a window.
" Gets removed when closing the quickfix window that displays them, or
" when clearing them (e.g. on TextChanged).
augroup jedi_usages
autocmd! BufWinEnter,WinEnter * call jedi#_show_usages_in_win()
augroup END
endif
endif
endfunction
" Highlight usages for a buffer if not done so yet (Neovim only).
function! s:usages_for_pending_buffers() abort
python3 jedi_vim._handle_pending_usages_for_buf()
endfunction
function! jedi#goto_window_on_enter() abort
let l:list = getqflist()
let l:data = l:list[line('.') - 1]
if l:data.bufnr
" close goto_window buffer
normal! ZQ
python3 jedi_vim.set_buffer(vim.eval('bufname(l:data.bufnr)'))
call cursor(l:data.lnum, l:data.col)
else
echohl WarningMsg | echo 'Builtin module cannot be opened.' | echohl None
endif
endfunction
function! s:syn_stack() abort
if !exists('*synstack')
return []
endif
return map(synstack(line('.'), col('.') - 1), "synIDattr(v:val, 'name')")
endfunc
function! jedi#do_popup_on_dot_in_highlight() abort
let highlight_groups = s:syn_stack()
for a in highlight_groups
if a ==# 'pythonDoctest'
return 1
endif
endfor
for a in highlight_groups
for b in ['pythonString', 'pythonComment', 'pythonNumber']
if a == b
return 0
endif
endfor
endfor
return 1
endfunc
let s:show_call_signatures_last = [0, 0, '']
function! jedi#show_call_signatures() abort
if s:_init_python == 0
return 1
endif
let [line, col] = [line('.'), col('.')]
let curline = getline(line)
let reload_signatures = 1
" Caching. On the same line only.
if line == s:show_call_signatures_last[0]
" Check if the number of special signs before or after the
" cursor has not changed since the last call, which means that the
" argument position was not changed and we can skip repainting.
let prevcol = s:show_call_signatures_last[1]
let prevline = s:show_call_signatures_last[2]
let no_special = '[^,()=]'
if substitute(curline[:col-2], no_special, '', 'g')
\ == substitute(prevline[:prevcol-2], no_special, '', 'g')
\ && substitute(curline[(col-2):], no_special, '', 'g')
\ == substitute(prevline[(prevcol-2):], no_special, '', 'g')
let reload_signatures = 0
endif
endif
let s:show_call_signatures_last = [line, col, curline]
if reload_signatures
python3 jedi_vim.show_call_signatures()
endif
endfunction
function! jedi#clear_call_signatures() abort
if s:_init_python == 0
return 1
endif
let s:show_call_signatures_last = [0, 0, '']
python3 jedi_vim.clear_call_signatures()
endfunction
function! jedi#configure_call_signatures() abort
augroup jedi_call_signatures
autocmd! * <buffer>
if g:jedi#show_call_signatures == 2 " Command line call signatures
autocmd InsertEnter <buffer> let g:jedi#first_col = s:save_first_col()
endif
autocmd InsertEnter <buffer> let s:show_call_signatures_last = [0, 0, '']
autocmd InsertLeave <buffer> call jedi#clear_call_signatures()
if g:jedi#show_call_signatures_delay > 0
autocmd InsertEnter <buffer> let b:_jedi_orig_updatetime = &updatetime
\ | let &updatetime = g:jedi#show_call_signatures_delay
autocmd InsertLeave <buffer> if exists('b:_jedi_orig_updatetime')
\ | let &updatetime = b:_jedi_orig_updatetime
\ | unlet b:_jedi_orig_updatetime
\ | endif
autocmd CursorHoldI <buffer> call jedi#show_call_signatures()
else
autocmd CursorMovedI <buffer> call jedi#show_call_signatures()
endif
augroup END
endfunction
" Determine where the current window is on the screen for displaying call
" signatures in the correct column.
function! s:save_first_col() abort
if bufname('%') ==# '[Command Line]' || winnr('$') == 1
return 0
endif
let startwin = winnr()
let winwidth = winwidth(0)
if winwidth == &columns
return 0
elseif winnr('$') == 2
return startwin == 1 ? 0 : (winwidth(1) + 1)
elseif winnr('$') == 3
if startwin == 1
return 0
endif
let ww1 = winwidth(1)
let ww2 = winwidth(2)
let ww3 = winwidth(3)
if ww1 + ww2 + ww3 + 2 == &columns
if startwin == 2
return ww1 + 1
else
return ww1 + ww2 + 2
endif
elseif startwin == 2
if ww2 + ww3 + 1 == &columns
return 0
else
return ww1 + 1
endif
else " startwin == 3
if ww2 + ww3 + 1 == &columns
return ww2 + 1
else
return ww1 + 1
endif
endif
endif
return 0
endfunction
function! jedi#complete_string(autocomplete) abort
if a:autocomplete
if !(g:jedi#popup_on_dot && jedi#do_popup_on_dot_in_highlight())
return ''
endif
let s:saved_completeopt = &completeopt
set completeopt-=longest
set completeopt+=menuone
set completeopt-=menu
if &completeopt !~# 'noinsert\|noselect'
" Patch 775 introduced noinsert and noselect, previously these
" options didn't exist. Setting them in earlier versions results in
" errors (E474).
if has('patch-7.4-775')
if g:jedi#popup_select_first
set completeopt+=noinsert
else
set completeopt+=noselect
endif
else
" To pass the tests we use this, it seems to get the closest to
" the other options. I'm really not sure if this properly
" works, but VIM 7.4-775 is already pretty old, so it might not
" be a problem anymore in a few years.
set completeopt+=longest
endif
endif
elseif pumvisible()
return "\<C-n>"
endif
return "\<C-x>\<C-o>\<C-r>=jedi#complete_opened(".a:autocomplete.")\<CR>"
endfunction
function! jedi#complete_opened(autocomplete) abort
if a:autocomplete
let &completeopt = s:saved_completeopt
unlet s:saved_completeopt
elseif pumvisible() && g:jedi#popup_select_first && stridx(&completeopt, 'longest') > -1
return "\<Down>"
endif
return ''
endfunction
function! jedi#smart_auto_mappings() abort
" Auto put import statement after from module.name<space> and complete
if search('\m^\s*from\s\+[A-Za-z0-9._]\{1,50}\%#\s*$', 'bcn', line('.'))
" Enter character and start completion.
return "\<space>import \<C-r>=jedi#complete_string(1)\<CR>"
endif
return "\<space>"
endfunction
function! jedi#setup_completion() abort
" We need our own omnifunc, so this overrides the omnifunc set by
" $VIMRUNTIME/ftplugin/python.vim.
setlocal omnifunc=jedi#completions
" map ctrl+space for autocompletion
if g:jedi#completions_command ==# '<C-Space>'
" In terminals, <C-Space> sometimes equals <Nul>.
imap <buffer> <Nul> <C-Space>
smap <buffer> <Nul> <C-Space>
endif
if len(g:jedi#completions_command)
execute 'inoremap <expr> <buffer> '.g:jedi#completions_command.' jedi#complete_string(0)'
" A separate mapping for select mode: deletes and completes.
execute 'snoremap <expr> <buffer> '.g:jedi#completions_command." '\<C-g>c'.jedi#complete_string(0)"
endif
endfunction
"python3 jedi_vim.jedi.set_debug_function(jedi_vim.print_to_stdout, speed=True, warnings=False, notices=False)
"python3 jedi_vim.jedi.set_debug_function(jedi_vim.print_to_stdout)
" vim: set et ts=4:

7
bundle/jedi-vim/codecov.yml vendored Normal file
View File

@ -0,0 +1,7 @@
coverage:
status:
project: true
patch: true
changes: true
comment: off

578
bundle/jedi-vim/doc/jedi-vim.txt vendored Normal file
View File

@ -0,0 +1,578 @@
*jedi-vim.txt* - For Vim version 7.3 - Last change: 2014/07/29
__ _______ _______ __ ____ ____ __ .___ ___.~
| | | ____|| \ | | \ \ / / | | | \/ |~
| | | |__ | .--. || | _____\ \/ / | | | \ / |~
.--. | | | __| | | | || | |______\ / | | | |\/| |~
| `--' | | |____ | '--' || | \ / | | | | | |~
\______/ |_______||_______/ |__| \__/ |__| |__| |__|~
jedi-vim - awesome Python autocompletion with Vim
==============================================================================
Contents *jedi-vim-contents*
1. Introduction |jedi-vim-introduction|
2. Installation |jedi-vim-installation|
2.0. Requirements |jedi-vim-installation-requirements|
2.1. Manually |jedi-vim-installation-manually|
2.2. Using Pathogen |jedi-vim-installation-pathogen|
2.3. Using Vundle |jedi-vim-installation-vundle|
2.4. Installing from Repositories |jedi-vim-installation-repos|
3. Supported Python features |jedi-vim-support|
4. Usage |jedi-vim-usage|
5. Mappings |jedi-vim-keybindings|
5.1. Start completion |g:jedi#completions_command|
5.2. Go to definition |g:jedi#goto_command|
5.3. Go to assignment |g:jedi#goto_assignments_command|
5.4 Go to stub |g:jedi#goto_stubs_command|
5.5. Show documentation |g:jedi#documentation_command|
5.6. Rename variables |g:jedi#rename_command|
5.7. Show name usages |g:jedi#usages_command|
5.8. Open module by name |:Pyimport|
6. Configuration |jedi-vim-configuration|
6.1. auto_initialization |g:jedi#auto_initialization|
6.2. auto_vim_configuration |g:jedi#auto_vim_configuration|
6.3. popup_on_dot |g:jedi#popup_on_dot|
6.4. popup_select_first |g:jedi#popup_select_first|
6.5. auto_close_doc |g:jedi#auto_close_doc|
6.6. show_call_signatures |g:jedi#show_call_signatures|
6.7. show_call_signatures_delay |g:jedi#show_call_signatures_delay|
6.8. use_tabs_not_buffers |g:jedi#use_tabs_not_buffers|
6.9. squelch_py_warning |g:jedi#squelch_py_warning|
6.10. completions_enabled |g:jedi#completions_enabled|
6.11. use_splits_not_buffers |g:jedi#use_splits_not_buffers|
6.12. force_py_version |g:jedi#force_py_version|
6.13. smart_auto_mappings |g:jedi#smart_auto_mappings|
6.14. use_tag_stack |g:jedi#use_tag_stack|
6.15. environment_path |g:jedi#environment_path|
|b:jedi_environment_path|
6.16. added_sys_path |g:jedi#added_sys_path|
|b:jedi_added_sys_path|
6.17. case_insensitive_completion |g:jedi#case_insensitive_completion|
|b:jedi_case_insensitive_completion|
7. Testing |jedi-vim-testing|
8. Contributing |jedi-vim-contributing|
9. License |jedi-vim-license|
==============================================================================
1. Introduction *jedi-vim-introduction*
Jedi-vim is a Vim binding to the awesome Python autocompletion library
`jedi`. Among jedi's (and, therefore, jedi-vim's) features are:
- Completion for a wide array of Python features (see |jedi-vim-support|)
- Robust in dealing with syntax errors and wrong indentation
- Parses complex module/function/class structures
- Infers function arguments from Sphinx/Epydoc strings
- Doesn't execute Python code
- Supports Virtualenv
- Supports Python 2.7 and 3.4+
By leveraging this library, jedi-vim adds the following capabilities to Vim:
- Displaying function/class bodies
- "Go to definition" command
- Displaying docstrings
- Renaming and refactoring
- Looking up related names
==============================================================================
2. Installation *jedi-vim-installation*
------------------------------------------------------------------------------
2.0. Requirements *jedi-vim-installation-requirements*
First of all, jedi-vim requires Vim to be compiled with the `+python` option.
It is best if you have VIM >= 7.3, compiled with the `+conceal` option. With
older versions, you will probably not see the parameter recommendation list
for functions after typing the open bracket. Some platforms (including OS X
releases) do not ship a VIM with `+conceal`. You can check if your VIM has the
feature with >
:ver
and look for "`+conceal`" (as opposed to "`-conceal`") or >
:echo has('conceal')
which will report 0 (not included) or 1 (included). If your VIM lacks this
feature and you would like function parameter completion, you will need to
build your own VIM, or use a package for your operating system that has this
feature (such as MacVim on OS X, which also contains a console binary).
------------------------------------------------------------------------------
2.1. Installing manually *jedi-vim-installation-manually*
1. If you want to install jedi as a submodule instead, issue this command: >
git clone --recursive http://github.com/davidhalter/jedi-vim
2. Put the plugin files into their respective folders in your vim runtime
directory (usually ~/.vim). Be sure to pay attention to the directory
structure!
3. Update the Vim help tags with >
:helptags <path/to/vimruntime>/doc
------------------------------------------------------------------------------
2.2. Installing using Pathogen *jedi-vim-installation-pathogen*
Pathogen simplifies installation considerably.
1.a Clone the git repository into your bundles directory: >
git clone http://github.com/davidhalter/jedi-vim path/to/bundles/jedi-vim
1b. Again, if you want to install jedi as a submodule, use this command
instead: >
git clone --recursive http://github.com/davidhalter/jedi-vim
------------------------------------------------------------------------------
2.3. Installing using Vundle *jedi-vim-installation-vundle*
1. Vundle automatically downloads subrepositories as git submodules, so you
will automatically get the jedi library with the jedi-vim plugin. Add the
following to the Bundles section in your .vimrc file: >
Plugin 'davidhalter/jedi-vim'
2. Issue the following command in Vim: >
:PluginInstall
Help tags are generated automatically, so you should be good to go.
------------------------------------------------------------------------------
2.4. Installing from Repositories *jedi-vim-installation-repos*
Some Linux distributions have jedi-vim packages in their official
repositories. On Arch Linux, install vim-jedi. On Debian (8+) or Ubuntu
(14.04+) install vim-python-jedi.
==============================================================================
3. Supported Python features *jedi-vim-support*
The Jedi library does all the hard work behind the scenes. It understands most
Python features, among them:
- Builtins
- Multiple `return`s or `yield`s
- Tuple assignments/array indexing/dictionary indexing
- `with`-statement/exception handling
- `*args` and `**kwargs`
- Decorators, lambdas, closures
- Generators, iterators
- Some descriptors: `property`/`staticmethod`/`classmethod`
- Some magic methods: `__call__`, `__iter__`, `__next__`, `__get__`,
`__getitem__`, `__init__`
- `list.append()`, `set.add()`, `list.extend()`, etc.
- (Nested) list comprehensions and ternary expressions
- Relative `import`s
- `getattr()`/`__getattr__`/`__getattribute__`
- Function annotations (py3k feature, are being ignored at the moment, but are
parsed)
- Class decorators (py3k feature, are being ignored at the moment, but are
parsed)
- Simple/usual `sys.path` modifications
- `isinstance` checks for `if`/`while`/`assert` case, that doesn't work with
Jedi
- Stubs
- And more...
Note: This list is not necessarily up to date. For a complete list of
features, please refer to the Jedi documentation at
http://jedi.readthedocs.io.
==============================================================================
4. Usage *jedi-vim-usage*
With the default settings, autocompletion can be triggered by typing
<Ctrl-Space>. The first entry will automatically be selected, so you can press
<Return> to insert it into your code or keep typing and narrow down your
completion options. The usual <C-X><C-O> and <C-P>/<C-N> keybindings work as
well. Autocompletion is also triggered by typing a period in insert mode.
Since periods rarely occur in Python code outside of method/import lookups,
this is handy to have (but can be disabled).
When it encounters a new module, jedi might take a few seconds to parse that
module's contents. Afterwards, the contents are cached and completion will be
almost instantaneous.
==============================================================================
5. Key Bindings *jedi-vim-keybindings*
All keybindings can be mapped by setting the appropriate global option. For
example, to set the keybinding for starting omnicompletion to <C-N> instead of
<Ctrl-Space>, add the following setting to your .vimrc file: >
let g:jedi#completions_command = "<C-N>"
Note: If you have |g:jedi#auto_initialization| set to 0, you have to create
a mapping yourself by calling a function: >
" Using <C-N> for omnicompletion
inoremap <silent> <buffer> <C-N> <c-x><c-o>
" Use <localleader>r (by default <\-r>) for renaming
nnoremap <silent> <buffer> <localleader>r :call jedi#rename()<cr>
" etc.
Note: You can set commands to '', which means that they are empty and not
assigned. It's an easy way to "disable" functionality of jedi-vim.
------------------------------------------------------------------------------
5.1. `g:jedi#completions_command` *g:jedi#completions_command*
Function: n/a; see above
Default: <Ctrl-Space> Start completion
Performs autocompletion (or omnicompletion, to be precise).
Note: If you want to use <Tab> for completion, please install Supertab:
https://github.com/ervandew/supertab.
------------------------------------------------------------------------------
5.2. `g:jedi#goto_command` *g:jedi#goto_command*
Function: `jedi#goto()`
Default: <leader>d Go to definition (or assignment)
This function first tries |jedi#goto_definitions|, and falls back to
|jedi#goto_assignments| for builtin modules. It produces an error if nothing
could be found.
NOTE: this implementation is subject to change.
Ref: https://github.com/davidhalter/jedi/issues/570
This command tries to find the original definition of the function/class under
the cursor. Just like the `jedi#goto_assignments()` function, it does not work
if the definition isn't in a Python source file.
The difference between `jedi#goto_assignments()` and `jedi#goto_definitions()`
is that the latter performs recursive lookups. Take, for example, the
following module structure: >
# file1.py:
from file2 import foo
# file2.py:
from file3 import bar as foo
# file3.py
def bar():
pass
The `jedi#goto_assignments()` function will take you to the >
from file2 import foo
statement in file1.py, while the `jedi#goto_definitions()` function will take
you all the way to the >
def bar():
line in file3.py.
------------------------------------------------------------------------------
5.3. `g:jedi#goto_assignments_command` *g:jedi#goto_assignments_command*
Function: `jedi#goto_assignments()`
Default: <leader>g Go to assignment
This function finds the first definition of the function/class under the
cursor. It produces an error if the definition is not in a Python file.
------------------------------------------------------------------------------
5.4. `g:jedi#goto_stubs_command` *g:jedi#goto_stubs_command*
Function: `jedi#goto_stubs()`
Default: <leader>s Go to stub
Finds the stub of the function/class under the cursor.
------------------------------------------------------------------------------
5.5. `g:jedi#documentation_command` *g:jedi#documentation_command*
Function: `jedi#show_documentation()`
Default: <K> Show pydoc documentation
This shows the pydoc documentation for the item currently under the cursor.
The documentation is opened in a horizontally split buffer. The height of this
buffer is controlled by `g:jedi#max_doc_height` (set by default to 30).
------------------------------------------------------------------------------
5.6. `g:jedi#rename_command` *g:jedi#rename_command*
Function: `jedi#rename()`
Default: <leader>r Rename variables
Jedi-vim deletes the word currently under the cursor and puts Vim in insert
mode, where the user is expected to enter the new variable name. Upon leaving
insert mode, jedi-vim then renames all occurrences of the old variable name
with the new one. The number of performed renames is displayed in the command
line.
------------------------------------------------------------------------------
5.7. `g:jedi#usages_command` *g:jedi#usages_command*
Function: `jedi#usages()`
Default: <leader>n Show usages of a name.
The quickfix window is populated with a list of all names which point to the
definition of the name under the cursor.
------------------------------------------------------------------------------
5.8. Open module by name *:Pyimport*
Function: `jedi#py_import(args)`
Default: :Pyimport e.g. `:Pyimport os` shows os.py in VIM.
Simulate an import and open that module in VIM.
==============================================================================
6. Configuration *jedi-vim-configuration*
Note: You currently have to set these options in your .vimrc. Setting them in
an ftplugin (e.g. ~/.vim/ftplugin/python/jedi-vim-settings.vim) will not work
because jedi-vim is not set up as an filetype plugin, but as a "regular"
plugin.
------------------------------------------------------------------------------
6.1. `g:jedi#auto_initialization` *g:jedi#auto_initialization*
Upon initialization, jedi-vim performs the following steps:
1. Set the current buffers 'omnifunc' to its own completion function
`jedi#completions`
2. Create mappings to commands specified in |jedi-vim-keybindings|
3. Call `jedi#configure_call_signatures()` if
`g:jedi#show_call_signatures` is set
You can disable the default initialization routine by setting this option to
0. Beware that you have to perform the above steps yourself, though.
Options: 0 or 1
Default: 1 (Perform automatic initialization)
------------------------------------------------------------------------------
6.2. `g:jedi#auto_vim_configuration` *g:jedi#auto_vim_configuration*
Jedi-vim sets 'completeopt' to `menuone,longest,preview` by default, if
'completeopt' is not changed from Vim's default.
It also remaps <Ctrl-C> to <Esc> in insert mode.
If you want to keep your own configuration, disable this setting.
Options: 0 or 1
Default: 1 (Set 'completeopt' and mapping as described above)
------------------------------------------------------------------------------
6.3. `g:jedi#popup_on_dot` *g:jedi#popup_on_dot*
Jedi-vim automatically starts completion upon typing a period in insert mode.
However, when working with large modules, this can slow down your typing flow
since you have to wait for jedi to parse the module and show the completion
menu. By disabling this setting, completion is only started when you manually
press the completion key.
You need to also have `g:jedi#completions_enabled` enabled for this.
Options: 0 or 1
Default: 1 (Start completion on typing a period)
------------------------------------------------------------------------------
6.4. `g:jedi#popup_select_first` *g:jedi#popup_select_first*
Upon starting completion, jedi-vim can automatically select the first entry
that pops up (without actually inserting it).
This leads to a better typing flow: As you type more characters, the entries
in the completion menu are narrowed down. If they are narrowed down enough,
you can just press <Return> to insert the first match.
Options: 0 or 1
Default: 1 (Automatically select first completion entry)
------------------------------------------------------------------------------
6.5. `g:jedi#auto_close_doc` *g:jedi#auto_close_doc*
When doing completion, jedi-vim shows the docstring of the currently selected
item in a preview window. By default, this window is being closed after
insertion of a completion item.
Set this to 0 to leave the preview window open even after leaving insert mode.
This could be useful if you want to browse longer docstrings.
Options: 0 or 1
Default: 1 (Automatically close preview window upon leaving insert mode)
------------------------------------------------------------------------------
6.6. `g:jedi#show_call_signatures` *g:jedi#show_call_signatures*
Jedi-vim can display a small window detailing the arguments of the currently
completed function and highlighting the currently selected argument. This can
be disabled by setting this option to 0. Setting this option to 2 shows call
signatures in the command line instead of a popup window.
Options: 0, 1, or 2
Default: 1 (Show call signatures window)
Note: 'showmode' must be disabled for command line call signatures to be
visible.
Note: This setting is ignored if |g:jedi#auto_initialization| is set to 0. In
that case, if you want to see call signatures, you have to set it up
manually by calling a function in your configuration file: >
call jedi#configure_call_signatures()
------------------------------------------------------------------------------
6.7. `g:jedi#show_call_signatures_delay` *g:jedi#show_call_signatures_delay*
The delay to be used with |g:jedi#show_call_signatures|. If it is greater
than 0 it will use Vim's |CursorHoldI| event instead of |CursorMovedI|.
It will temporarily set Vim's |'updatetime'| option during insert mode.
Options: delay in milliseconds
Default: 500
------------------------------------------------------------------------------
6.8. `g:jedi#use_tabs_not_buffers` *g:jedi#use_tabs_not_buffers*
You can make jedi-vim open a new tab if you use the "go to", "show
definition", or "related names" commands. When you leave this at the default
(0), they open in the current window instead.
Options: 0 or 1
Default: 0 (Command output reuses current window)
------------------------------------------------------------------------------
6.9. `g:jedi#squelch_py_warning` *g:jedi#squelch_py_warning*
When Vim has not been compiled with +python, jedi-vim shows a warning to that
effect and aborts loading itself. Set this to 1 to suppress that warning.
Options: 0 or 1
Default: 0 (Warning is shown)
------------------------------------------------------------------------------
6.10. `g:jedi#completions_enabled` *g:jedi#completions_enabled*
If you don't want Jedi completion, but all the other features, you can disable
it in favor of another completion engine (that probably also uses Jedi, like
YCM).
Options: 0 or 1
Default: 1
------------------------------------------------------------------------------
6.11. `g:jedi#use_splits_not_buffers` *g:jedi#use_splits_not_buffers*
If you want to open new split for "go to", you could set this option to the
direction which you want to open a split with.
Options: top, left, right, bottom or winwidth
Default: "" (not enabled by default)
Note: with the 'winwidth' option the window is split vertically or horizontally
depending on the width of the window relative to 'textwidth'. This essentially
means that if the window is big enough it will be split vertically but if it is
small a horizontal split happens.
------------------------------------------------------------------------------
6.12. `g:jedi#force_py_version` *g:jedi#force_py_version*
If you have installed multiple Python versions, you can force the Python
version that is going to be used.
You don't have to compile VIM with multiple Python versions.
The variable can be set in the .vimrc like this to force python 2:
let g:jedi#force_py_version = 2
By default jedi loads the latest Python version installed on your system that
can be found.
This variable can be changed during runtime.
Options: 2, 2.7, 3, 3.5, 3.6, ...
Default: "auto"
------------------------------------------------------------------------------
6.13. `g:jedi#smart_auto_mappings` *g:jedi#smart_auto_mappings*
When you start typing `from module.name<space>` jedi-vim automatically
can add the "import" statement and trigger the autocompletion popup.
You can enable this using: >
let g:jedi#smart_auto_mappings = 1
<
Options: 0 or 1
Default: 0 (disabled by default)
------------------------------------------------------------------------------
6.14. `g:jedi#use_tag_stack` *g:jedi#use_tag_stack*
Write results of |jedi#goto| to a temporary file and use the |:tjump| command
to enable full |tagstack| functionality. Use of the tag stack allows
returning to the usage of a function with CTRL-T after exploring the
definition with arbitrary changes to the |jumplist|.
Options: 0 or 1
Default: 1 (enabled by default)
------------------------------------------------------------------------------
6.15. `g:jedi#environment_path` *g:jedi#environment_path*
*b:jedi_environment_path*
To use a specific virtualenv or a specific Python version it is possible to
set an interpreter.
Both setting the directory and setting a project is working.
Examples: "/usr/bin/python3.9", "venv", "../venv", "../venv/bin/python"
The buffer-local variable `b:jedi_environment_path` can be used to override the
global variable `g:jedi#environment_path`.
Default: "auto"
------------------------------------------------------------------------------
6.16. `g:jedi#added_sys_path` *g:jedi#added_sys_path*
*b:jedi_added_sys_path*
To add extra sys_path.
The buffer-local variable `b:jedi_added_sys_path` can be used to add
additional extra sys_path.
Examples: ["../site-packages"]
Default: []
------------------------------------------------------------------------------
6.17. `g:jedi#case_insensitive_completion` *g:jedi#case_insensitive_completion*
*b:jedi_case_insensitive_completion*
0 to disable case insensitive completion.
1 to enable case insensitive completion (default).
The buffer-local variable `b:jedi_case_insensitive_completion` can be used to
override the global variable `g:jedi#case_insensitive_completion`.
Default: 1
==============================================================================
7. Testing *jedi-vim-testing*
jedi-vim is being tested with a combination of vspec
https://github.com/kana/vim-vspec and py.test http://pytest.org/.
The tests are in the test subdirectory, you can run them calling::
py.test
The tests are automatically run with `travis
<https://travis-ci.org/davidhalter/jedi-vim>`_.
==============================================================================
8. Contributing *jedi-vim-contributing*
We love Pull Requests! Read the instructions in `CONTRIBUTING.md`.
==============================================================================
9. License *jedi-vim-license*
Jedi-vim is licensed with the MIT license.
vim: textwidth=78 et filetype=help:norightleft:

140
bundle/jedi-vim/doc/logotype-a.svg vendored Normal file
View File

@ -0,0 +1,140 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 15.1.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg version="1.1" id="Layer_2" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
width="897.845px" height="247.51px" viewBox="0 0 897.845 247.51" enable-background="new 0 0 897.845 247.51"
xml:space="preserve">
<g>
<g>
<linearGradient id="SVGID_1_" gradientUnits="userSpaceOnUse" x1="287.3965" y1="65.2686" x2="287.3965" y2="106.4546">
<stop offset="0" style="stop-color:#E27817"/>
<stop offset="0.3906" style="stop-color:#E47519"/>
<stop offset="0.7116" style="stop-color:#E96B1F"/>
<stop offset="1" style="stop-color:#F15A29"/>
</linearGradient>
<polygon fill="url(#SVGID_1_)" points="285.068,66.556 272.054,95.664 302.739,95.664 302.739,66.556 "/>
<linearGradient id="SVGID_2_" gradientUnits="userSpaceOnUse" x1="216.8877" y1="65.2686" x2="216.8877" y2="106.4546">
<stop offset="0" style="stop-color:#E27817"/>
<stop offset="0.3906" style="stop-color:#E47519"/>
<stop offset="0.7116" style="stop-color:#E96B1F"/>
<stop offset="1" style="stop-color:#F15A29"/>
</linearGradient>
<polygon fill="url(#SVGID_2_)" points="180.483,95.664 193.893,95.664 240.172,95.664 253.292,66.556 180.483,66.556 "/>
<g>
<polygon fill="#019733" points="256.464,59.293 253.603,65.696 253.593,65.696 253.197,66.592 240.172,95.7 215.738,150.321
199.788,185.978 231.676,185.978 231.676,185.969 272.071,95.7 275.634,87.737 285.089,66.592 288.355,59.293 "/>
</g>
<g>
<polygon fill="#019733" points="215.738,150.321 194.605,95.7 180.483,95.7 180.483,135.118 199.788,185.969 199.788,185.978
"/>
</g>
<path fill="#019733" d="M151.355,59.294v100.005h-28.989h-11.292v-7.972c3.644-4.232,6.749-9.936,6.749-16.218
c0-2.846-0.456-5.578-1.294-8.104h0.011l-4.274-12.151c-0.238-1.16-0.633-2.257-1.15-3.281v-0.032l-5.878-11.59
c-1.201-2.808-3.975-4.897-7.34-5.362c0.281-4.969,2.837-8.87,5.932-8.87l-3.137-1.004l3.137-3.033
c-5.01,0-9.098,5.775-9.388,13.021c-3.116,0.609-5.672,2.598-6.79,5.258l-5.723,11.271c-0.052,0.104-0.104,0.197-0.155,0.3v0.01
c-0.528,1.057-0.932,2.174-1.169,3.344l-4.232,12.079c-0.012,0.02-0.012,0.03-0.021,0.042c-0.828,2.525-1.294,5.258-1.294,8.104
c0,6.282,3.25,11.985,6.904,16.218v35.616h29.112h11.294h58.105V59.294H151.355z M86.554,117.834
c-0.673,2.019-2.442,3.252-3.954,2.744c-1.51-0.508-2.194-2.547-1.521-4.565c0.673-2.017,2.443-3.25,3.953-2.742
C86.544,113.777,87.228,115.817,86.554,117.834z M110.163,120.578c-1.51,0.508-3.27-0.726-3.954-2.744
c-0.672-2.017,0.011-4.057,1.521-4.563c1.512-0.508,3.282,0.726,3.955,2.742C112.357,118.031,111.674,120.07,110.163,120.578z"/>
<linearGradient id="SVGID_3_" gradientUnits="userSpaceOnUse" x1="134.6348" y1="65.2686" x2="134.6348" y2="106.4546">
<stop offset="0" style="stop-color:#E27817"/>
<stop offset="0.3906" style="stop-color:#E47519"/>
<stop offset="0.7116" style="stop-color:#E96B1F"/>
<stop offset="1" style="stop-color:#F15A29"/>
</linearGradient>
<polygon fill="url(#SVGID_3_)" points="151.292,66.556 117.914,66.556 117.914,95.664 145.981,95.664 151.292,95.664
151.355,95.664 151.355,66.592 151.292,66.592 "/>
<g opacity="0.5">
<linearGradient id="SVGID_4_" gradientUnits="userSpaceOnUse" x1="244.0713" y1="198.6924" x2="244.0713" y2="111.1674">
<stop offset="0" style="stop-color:#019733"/>
<stop offset="0.1363" style="stop-color:#0B732D;stop-opacity:0.8637"/>
<stop offset="0.2826" style="stop-color:#145529;stop-opacity:0.7174"/>
<stop offset="0.4366" style="stop-color:#1A3D25;stop-opacity:0.5634"/>
<stop offset="0.5997" style="stop-color:#1F2C22;stop-opacity:0.4003"/>
<stop offset="0.778" style="stop-color:#222221;stop-opacity:0.222"/>
<stop offset="1" style="stop-color:#231F20;stop-opacity:0"/>
</linearGradient>
<polygon fill="url(#SVGID_4_)" points="256.464,59.293 253.603,65.696 253.593,65.696 253.197,66.592 240.172,95.7
215.738,150.321 199.788,185.978 231.676,185.978 231.676,185.969 272.071,95.7 275.634,87.737 285.089,66.592 288.355,59.293
"/>
</g>
<linearGradient id="SVGID_5_" gradientUnits="userSpaceOnUse" x1="165.9131" y1="41.1123" x2="165.9131" y2="128.6277">
<stop offset="0" style="stop-color:#019733"/>
<stop offset="0.1363" style="stop-color:#0B732D;stop-opacity:0.8637"/>
<stop offset="0.2826" style="stop-color:#145529;stop-opacity:0.7174"/>
<stop offset="0.4366" style="stop-color:#1A3D25;stop-opacity:0.5634"/>
<stop offset="0.5997" style="stop-color:#1F2C22;stop-opacity:0.4003"/>
<stop offset="0.778" style="stop-color:#222221;stop-opacity:0.222"/>
<stop offset="1" style="stop-color:#231F20;stop-opacity:0"/>
</linearGradient>
<rect x="151.355" y="59.294" opacity="0.5" fill="url(#SVGID_5_)" width="29.116" height="127.649"/>
<g opacity="0.58">
<linearGradient id="SVGID_6_" gradientUnits="userSpaceOnUse" x1="198.1104" y1="83.4141" x2="198.1104" y2="154.2879">
<stop offset="0" style="stop-color:#019733"/>
<stop offset="0.0409" style="stop-color:#038E32;stop-opacity:0.9591"/>
<stop offset="0.2465" style="stop-color:#0F662B;stop-opacity:0.7535"/>
<stop offset="0.4491" style="stop-color:#184726;stop-opacity:0.5509"/>
<stop offset="0.6453" style="stop-color:#1E3123;stop-opacity:0.3547"/>
<stop offset="0.8322" style="stop-color:#222421;stop-opacity:0.1678"/>
<stop offset="1" style="stop-color:#231F20;stop-opacity:0"/>
</linearGradient>
<polygon fill="url(#SVGID_6_)" points="215.738,150.321 194.605,95.7 180.483,95.7 180.483,135.118 199.788,185.969
199.788,185.978 "/>
</g>
<linearGradient id="SVGID_7_" gradientUnits="userSpaceOnUse" x1="127.7627" y1="192.1367" x2="127.7627" y2="137.5901">
<stop offset="0" style="stop-color:#019733"/>
<stop offset="0.0409" style="stop-color:#038E32;stop-opacity:0.9591"/>
<stop offset="0.2465" style="stop-color:#0F662B;stop-opacity:0.7535"/>
<stop offset="0.4491" style="stop-color:#184726;stop-opacity:0.5509"/>
<stop offset="0.6453" style="stop-color:#1E3123;stop-opacity:0.3547"/>
<stop offset="0.8322" style="stop-color:#222421;stop-opacity:0.1678"/>
<stop offset="1" style="stop-color:#231F20;stop-opacity:0"/>
</linearGradient>
<path opacity="0.6" fill="url(#SVGID_7_)" d="M151.355,59.294v100.005h-28.989h-11.292v-7.972
c3.644-4.232,6.749-9.936,6.749-16.218c0-2.846-0.456-5.578-1.294-8.104h0.011l-4.274-12.151c-0.238-1.16-0.633-2.257-1.15-3.281
v-0.032l-5.878-11.59c-1.201-2.808-3.975-4.897-7.34-5.362c0.281-4.969,2.837-8.87,5.932-8.87l-3.137-1.004l3.137-3.033
c-5.01,0-9.098,5.775-9.388,13.021c-3.116,0.609-5.672,2.598-6.79,5.258l-5.723,11.271c-0.052,0.104-0.104,0.197-0.155,0.3v0.01
c-0.528,1.057-0.932,2.174-1.169,3.344l-4.232,12.079c-0.012,0.02-0.012,0.03-0.021,0.042c-0.828,2.525-1.294,5.258-1.294,8.104
c0,6.282,3.25,11.985,6.904,16.218v35.616h29.112h11.294h58.105V59.294H151.355z M86.554,117.834
c-0.673,2.019-2.442,3.252-3.954,2.744c-1.51-0.508-2.194-2.547-1.521-4.565c0.673-2.017,2.443-3.25,3.953-2.742
C86.544,113.777,87.228,115.817,86.554,117.834z M110.163,120.578c-1.51,0.508-3.27-0.726-3.954-2.744
c-0.672-2.017,0.011-4.057,1.521-4.563c1.512-0.508,3.282,0.726,3.955,2.742C112.357,118.031,111.674,120.07,110.163,120.578z"/>
<g opacity="0.5">
<linearGradient id="SVGID_8_" gradientUnits="userSpaceOnUse" x1="244.0713" y1="53.3584" x2="244.0713" y2="78.0277">
<stop offset="0" style="stop-color:#019733"/>
<stop offset="0.1363" style="stop-color:#0B732D;stop-opacity:0.8637"/>
<stop offset="0.2826" style="stop-color:#145529;stop-opacity:0.7174"/>
<stop offset="0.4366" style="stop-color:#1A3D25;stop-opacity:0.5634"/>
<stop offset="0.5997" style="stop-color:#1F2C22;stop-opacity:0.4003"/>
<stop offset="0.778" style="stop-color:#222221;stop-opacity:0.222"/>
<stop offset="1" style="stop-color:#231F20;stop-opacity:0"/>
</linearGradient>
<polygon fill="url(#SVGID_8_)" points="256.464,59.293 253.603,65.696 253.593,65.696 253.197,66.592 240.172,95.7
215.738,150.321 199.788,185.978 231.676,185.978 231.676,185.969 272.071,95.7 275.634,87.737 285.089,66.592 288.355,59.293
"/>
</g>
<polygon fill="#019733" points="180.483,59.198 183.329,66.556 180.471,66.556 "/>
<polygon fill="#019733" points="180.483,59.198 183.329,66.556 180.471,66.556 "/>
<g>
<polygon fill="#019733" points="290.713,66.592 285.054,66.592 288.32,59.293 "/>
<polygon fill="#019733" points="308.417,120.554 283.562,95.7 300.261,95.7 "/>
<polygon opacity="0.6" fill="#019733" points="308.417,120.554 283.562,95.7 300.261,95.7 "/>
<polygon fill="#019733" points="290.713,66.592 285.054,66.592 288.32,59.293 "/>
</g>
</g>
<g>
<path fill="#666666" d="M358.849,130.077v23.827h28.81V77.887h16.547v91.732h-62.987v-39.542H358.849z"/>
<path fill="#666666" d="M434.61,93.602v22.677h45.356v14.949H434.61v22.677h45.356v15.715h-62.987V77.887h62.987v15.715H434.61z"
/>
<path fill="#666666" d="M492.744,169.619V77.887h56.342l6.643,8.05v75.634l-6.579,8.049H492.744z M539.184,93.602h-28.811v60.303
h28.811V93.602z"/>
<path fill="#666666" d="M586.136,169.619h-17.632V77.887h17.632V169.619z"/>
<path fill="#666666" d="M598.913,116.278h33.281v14.949h-33.281V116.278z"/>
<path fill="#666666" d="M691.412,77.887h16.544l-19.995,91.732h-21.462l-21.527-91.732h17.63l13.798,70.588L691.412,77.887z"/>
<path fill="#666666" d="M738.364,169.619h-17.631V77.887h17.631V169.619z"/>
<path fill="#666666" d="M768.77,169.619h-17.632V77.887h17.632l17.886,50.273l18.59-50.273h16.545v91.732h-16.545v-38.392
l-12.84,38.392h-10.35l-13.286-38.392V169.619z"/>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 9.8 KiB

View File

@ -0,0 +1,53 @@
if !jedi#init_python()
finish
endif
" ------------------------------------------------------------------------
" Initialization of jedi-vim
" ------------------------------------------------------------------------
if g:jedi#auto_initialization
" goto / get_definition / usages
if len(g:jedi#goto_command)
execute 'nnoremap <buffer> '.g:jedi#goto_command.' :call jedi#goto()<CR>'
endif
if len(g:jedi#goto_assignments_command)
execute 'nnoremap <buffer> '.g:jedi#goto_assignments_command.' :call jedi#goto_assignments()<CR>'
endif
if len(g:jedi#goto_definitions_command)
execute 'nnoremap <buffer> '.g:jedi#goto_definitions_command.' :call jedi#goto_definitions()<CR>'
endif
if len(g:jedi#goto_stubs_command)
execute 'nnoremap <buffer> '.g:jedi#goto_stubs_command.' :call jedi#goto_stubs()<CR>'
endif
if len(g:jedi#usages_command)
execute 'nnoremap <buffer> '.g:jedi#usages_command.' :call jedi#usages()<CR>'
endif
" rename
if len(g:jedi#rename_command)
execute 'nnoremap <buffer> '.g:jedi#rename_command.' :call jedi#rename()<CR>'
execute 'vnoremap <buffer> '.g:jedi#rename_command.' :call jedi#rename_visual()<CR>'
endif
" documentation/pydoc
if len(g:jedi#documentation_command)
execute 'nnoremap <silent> <buffer>'.g:jedi#documentation_command.' :call jedi#show_documentation()<CR>'
endif
if g:jedi#show_call_signatures > 0
call jedi#configure_call_signatures()
endif
if g:jedi#completions_enabled == 1
inoremap <silent> <buffer> . .<C-R>=jedi#complete_string(1)<CR>
endif
if g:jedi#smart_auto_mappings == 1
inoremap <silent> <buffer> <space> <C-R>=jedi#smart_auto_mappings()<CR>
end
if g:jedi#auto_close_doc
" close preview if its still open after insert
augroup jedi_preview
autocmd! InsertLeave <buffer> if pumvisible() == 0|pclose|endif
augroup END
endif
endif

77
bundle/jedi-vim/plugin/jedi.vim vendored Normal file
View File

@ -0,0 +1,77 @@
"jedi-vim - Omni Completion for python in vim
" Maintainer: David Halter <davidhalter88@gmail.com>
"
" This part of the software is just the vim interface. The really big deal is
" the Jedi Python library.
if get(g:, 'jedi#auto_vim_configuration', 1)
" jedi-vim doesn't work in compatible mode (vim script syntax problems)
if &compatible
" vint: -ProhibitSetNoCompatible
set nocompatible
" vint: +ProhibitSetNoCompatible
endif
" jedi-vim really needs, otherwise jedi-vim cannot start.
filetype plugin on
augroup jedi_pyi
au!
autocmd BufNewFile,BufRead *.pyi set filetype=python
augroup END
" Change completeopt, but only if it was not set already.
" This gets done on VimEnter, since otherwise Vim fails to restore the
" screen. Neovim is not affected, this is likely caused by using
" :redir/execute() before the (alternate) terminal is configured.
function! s:setup_completeopt()
if exists('*execute')
let completeopt = execute('silent verb set completeopt?')
else
redir => completeopt
silent verb set completeopt?
redir END
endif
if len(split(completeopt, '\n')) == 1
set completeopt=menuone,longest,preview
endif
endfunction
if has('nvim')
call s:setup_completeopt()
else
augroup jedi_startup
au!
autocmd VimEnter * call s:setup_completeopt()
augroup END
endif
if len(mapcheck('<C-c>', 'i')) == 0
inoremap <C-c> <ESC>
endif
endif
" Pyimport command
command! -nargs=1 -complete=custom,jedi#py_import_completions Pyimport :call jedi#py_import(<q-args>)
command! -nargs=? -complete=file JediChooseEnvironment :call jedi#choose_environment(<q-args>)
command! -nargs=? -complete=file JediLoadProject :call jedi#load_project(<q-args>)
function! s:jedi_debug_info()
" Ensure the autoload file has been loaded (and ignore any errors, which
" will be displayed with the debug info).
let unset = {}
let saved_squelch_py_warning = get(g:, 'jedi#squelch_py_warning', unset)
let g:jedi#squelch_py_warning = 1
call jedi#init_python()
if saved_squelch_py_warning is unset
unlet g:jedi#squelch_py_warning
else
let g:jedi#squelch_py_warning = saved_squelch_py_warning
endif
call jedi#debug_info()
endfunction
command! -nargs=0 -bar JediDebugInfo call s:jedi_debug_info()
command! -nargs=0 -bang JediClearCache call jedi#clear_cache(<bang>0)
" vim: set et ts=4:

View File

@ -0,0 +1,14 @@
root = true
[*]
charset = utf-8
end_of_line = lf
indent_style = space
insert_final_newline = true
trim_trailing_whitespace = true
[*.py]
indent_size = 4
[*.md]
indent_size = 2

View File

@ -0,0 +1,10 @@
# all end-of-lines are normalized to LF when written to the repository
# https://git-scm.com/docs/gitattributes#_text
* text=auto
# force all text files on the working dir to have LF line endings
# https://git-scm.com/docs/gitattributes#_eol
* text eol=lf
# PNGs are not text and should not be normalized
*.png -text

View File

@ -0,0 +1 @@
github: [davidhalter]

View File

@ -0,0 +1,73 @@
name: ci
on: [push, pull_request]
jobs:
tests:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-20.04, windows-2019]
python-version: ["3.10", "3.9", "3.8", "3.7", "3.6"]
environment: ['3.8', '3.10', '3.9', '3.7', '3.6', 'interpreter']
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
submodules: recursive
- uses: actions/setup-python@v2
if: ${{ matrix.environment != 'interpreter' }}
with:
python-version: ${{ matrix.environment }}
- uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: 'pip install .[testing]'
- name: Run tests
run: python -m pytest
env:
JEDI_TEST_ENVIRONMENT: ${{ matrix.environment }}
code-quality:
runs-on: ubuntu-20.04
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
submodules: recursive
- name: Install dependencies
run: 'pip install .[qa]'
- name: Run tests
run: |
python -m flake8 jedi setup.py
python -m mypy jedi sith.py
coverage:
runs-on: ubuntu-20.04
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
submodules: recursive
- name: Install dependencies
run: 'pip install .[testing] coverage'
- name: Run tests
run: |
python -m coverage run --source jedi -m pytest
python -m coverage report
- name: Upload coverage data
run: |
pip install --quiet codecov coveralls
python -m coverage xml
python -m coverage report -m
bash <(curl -s https://codecov.io/bash) -X gcov -X coveragepy -X search -X fix -X xcode -f coverage.xml

16
bundle/jedi-vim/pythonx/jedi/.gitignore vendored Normal file
View File

@ -0,0 +1,16 @@
*~
*.sw?
*.pyc
.ropeproject
.coveralls.yml
.coverage
.idea
/build/
/docs/_build/
/dist/
jedi.egg-info/
record.json
/.cache/
/.pytest_cache
/.mypy_cache
/venv/

View File

@ -0,0 +1,6 @@
[submodule "jedi/third_party/typeshed"]
path = jedi/third_party/typeshed
url = https://github.com/davidhalter/typeshed.git
[submodule "jedi/third_party/django-stubs"]
path = jedi/third_party/django-stubs
url = https://github.com/davidhalter/django-stubs

View File

@ -0,0 +1,2 @@
python:
pip_install: true

View File

@ -0,0 +1,68 @@
Main Authors
------------
- David Halter (@davidhalter) <davidhalter88@gmail.com>
- Takafumi Arakaki (@tkf) <aka.tkf@gmail.com>
Code Contributors
-----------------
- Danilo Bargen (@dbrgn) <mail@dbrgn.ch>
- Laurens Van Houtven (@lvh) <_@lvh.cc>
- Aldo Stracquadanio (@Astrac) <aldo.strac@gmail.com>
- Jean-Louis Fuchs (@ganwell) <ganwell@fangorn.ch>
- tek (@tek)
- Yasha Borevich (@jjay) <j.borevich@gmail.com>
- Aaron Griffin <aaronmgriffin@gmail.com>
- andviro (@andviro)
- Mike Gilbert (@floppym) <floppym@gentoo.org>
- Aaron Meurer (@asmeurer) <asmeurer@gmail.com>
- Lubos Trilety <ltrilety@redhat.com>
- Akinori Hattori (@hattya) <hattya@gmail.com>
- srusskih (@srusskih)
- Steven Silvester (@blink1073)
- Colin Duquesnoy (@ColinDuquesnoy) <colin.duquesnoy@gmail.com>
- Jorgen Schaefer (@jorgenschaefer) <contact@jorgenschaefer.de>
- Fredrik Bergroth (@fbergroth)
- Mathias Fußenegger (@mfussenegger)
- Syohei Yoshida (@syohex) <syohex@gmail.com>
- ppalucky (@ppalucky)
- immerrr (@immerrr) immerrr@gmail.com
- Albertas Agejevas (@alga)
- Savor d'Isavano (@KenetJervet) <newelevenken@163.com>
- Phillip Berndt (@phillipberndt) <phillip.berndt@gmail.com>
- Ian Lee (@IanLee1521) <IanLee1521@gmail.com>
- Farkhad Khatamov (@hatamov) <comsgn@gmail.com>
- Kevin Kelley (@kelleyk) <kelleyk@kelleyk.net>
- Sid Shanker (@squidarth) <sid.p.shanker@gmail.com>
- Reinoud Elhorst (@reinhrst)
- Guido van Rossum (@gvanrossum) <guido@python.org>
- Dmytro Sadovnychyi (@sadovnychyi) <jedi@dmit.ro>
- Cristi Burcă (@scribu)
- bstaint (@bstaint)
- Mathias Rav (@Mortal) <rav@cs.au.dk>
- Daniel Fiterman (@dfit99) <fitermandaniel2@gmail.com>
- Simon Ruggier (@sruggier)
- Élie Gouzien (@ElieGouzien)
- Robin Roth (@robinro)
- Malte Plath (@langsamer)
- Anton Zub (@zabulazza)
- Maksim Novikov (@m-novikov) <mnovikov.work@gmail.com>
- Tobias Rzepka (@TobiasRzepka)
- micbou (@micbou)
- Dima Gerasimov (@karlicoss) <karlicoss@gmail.com>
- Max Woerner Chase (@mwchase) <max.chase@gmail.com>
- Johannes Maria Frank (@jmfrank63) <jmfrank63@gmail.com>
- Shane Steinert-Threlkeld (@shanest) <ssshanest@gmail.com>
- Tim Gates (@timgates42) <tim.gates@iress.com>
- Lior Goldberg (@goldberglior)
- Ryan Clary (@mrclary)
- Max Mäusezahl (@mmaeusezahl) <maxmaeusezahl@googlemail.com>
- Vladislav Serebrennikov (@endilll)
- Andrii Kolomoiets (@muffinmad)
- Leo Ryu (@Leo-Ryu)
- Joseph Birkner (@josephbirkner)
And a few more "anonymous" contributors.
Note: (@user) means a github user name.

View File

@ -0,0 +1,304 @@
.. :changelog:
Changelog
---------
Unreleased
++++++++++
0.18.1 (2021-11-17)
+++++++++++++++++++
- Implict namespaces are now a separate types in ``Name().type``
- Python 3.10 support
- Mostly bugfixes
0.18.0 (2020-12-25)
+++++++++++++++++++
- Dropped Python 2 and Python 3.5
- Using ``pathlib.Path()`` as an output instead of ``str`` in most places:
- ``Project.path``
- ``Script.path``
- ``Definition.module_path``
- ``Refactoring.get_renames``
- ``Refactoring.get_changed_files``
- Functions with ``@property`` now return ``property`` instead of ``function``
in ``Name().type``
- Started using annotations
- Better support for the walrus operator
- Project attributes are now read accessible
- Removed all deprecations
This is likely going to be the last minor release before 1.0.
0.17.2 (2020-07-17)
+++++++++++++++++++
- Added an option to pass environment variables to ``Environment``
- ``Project(...).path`` exists now
- Support for Python 3.9
- A few bugfixes
This will be the last release that supports Python 2 and Python 3.5.
``0.18.0`` will be Python 3.6+.
0.17.1 (2020-06-20)
+++++++++++++++++++
- Django ``Model`` meta class support
- Django Manager support (completion on Managers/QuerySets)
- Added Django Stubs to Jedi, thanks to all contributors of the
`Django Stubs <https://github.com/typeddjango/django-stubs>`_ project
- Added ``SyntaxError.get_message``
- Python 3.9 support
- Bugfixes (mostly towards Generics)
0.17.0 (2020-04-14)
+++++++++++++++++++
- Added ``Project`` support. This allows a user to specify which folders Jedi
should work with.
- Added support for Refactoring. The following refactorings have been
implemented: ``Script.rename``, ``Script.inline``,
``Script.extract_variable`` and ``Script.extract_function``.
- Added ``Script.get_syntax_errors`` to display syntax errors in the current
script.
- Added code search capabilities both for individual files and projects. The
new functions are ``Project.search``, ``Project.complete_search``,
``Script.search`` and ``Script.complete_search``.
- Added ``Script.help`` to make it easier to display a help window to people.
Now returns pydoc information as well for Python keywords/operators. This
means that on the class keyword it will now return the docstring of Python's
builtin function ``help('class')``.
- The API documentation is now way more readable and complete. Check it out
under https://jedi.readthedocs.io. A lot of it has been rewritten.
- Removed Python 3.4 support
- Many bugfixes
This is likely going to be the last minor version that supports Python 2 and
Python3.5. Bugfixes will be provided in 0.17.1+. The next minor/major version
will probably be Jedi 1.0.0.
0.16.0 (2020-01-26)
+++++++++++++++++++
- **Added** ``Script.get_context`` to get information where you currently are.
- Completions/type inference of **Pytest fixtures**.
- Tensorflow, Numpy and Pandas completions should now be about **4-10x faster**
after the first time they are used.
- Dict key completions are working now. e.g. ``d = {1000: 3}; d[10`` will
expand to ``1000``.
- Completion for "proxies" works now. These are classes that have a
``__getattr__(self, name)`` method that does a ``return getattr(x, name)``.
after loading them initially.
- Goto on a function/attribute in a class now goes to the definition in its
super class.
- Big **Script API Changes**:
- The line and column parameters of ``jedi.Script`` are now deprecated
- ``completions`` deprecated, use ``complete`` instead
- ``goto_assignments`` deprecated, use ``goto`` instead
- ``goto_definitions`` deprecated, use ``infer`` instead
- ``call_signatures`` deprecated, use ``get_signatures`` instead
- ``usages`` deprecated, use ``get_references`` instead
- ``jedi.names`` deprecated, use ``jedi.Script(...).get_names()``
- ``BaseName.goto_assignments`` renamed to ``BaseName.goto``
- Add follow_imports to ``Name.goto``. Now its signature matches
``Script.goto``.
- **Python 2 support deprecated**. For this release it is best effort. Python 2
has reached the end of its life and now it's just about a smooth transition.
Bugs for Python 2 will not be fixed anymore and a third of the tests are
already skipped.
- Removed ``settings.no_completion_duplicates``. It wasn't tested and nobody
was probably using it anyway.
- Removed ``settings.use_filesystem_cache`` and
``settings.additional_dynamic_modules``, they have no usage anymore. Pretty
much nobody was probably using them.
0.15.2 (2019-12-20)
+++++++++++++++++++
- Signatures are now detected a lot better
- Add fuzzy completions with ``Script(...).completions(fuzzy=True)``
- Files bigger than one MB (about 20kLOC) get cropped to avoid getting
stuck completely.
- Many small Bugfixes
- A big refactoring around contexts/values
0.15.1 (2019-08-13)
+++++++++++++++++++
- Small bugfix and removal of a print statement
0.15.0 (2019-08-11)
+++++++++++++++++++
- Added file path completions, there's a **new** ``Completion.type`` now:
``path``. Example: ``'/ho`` -> ``'/home/``
- ``*args``/``**kwargs`` resolving. If possible Jedi replaces the parameters
with the actual alternatives.
- Better support for enums/dataclasses
- When using Interpreter, properties are now executed, since a lot of people
have complained about this. Discussion in #1299, #1347.
New APIs:
- ``Name.get_signatures() -> List[Signature]``. Signatures are similar to
``CallSignature``. ``Name.params`` is therefore deprecated.
- ``Signature.to_string()`` to format signatures.
- ``Signature.params -> List[ParamName]``, ParamName has the
following additional attributes ``infer_default()``, ``infer_annotation()``,
``to_string()``, and ``kind``.
- ``Name.execute() -> List[Name]``, makes it possible to infer
return values of functions.
0.14.1 (2019-07-13)
+++++++++++++++++++
- CallSignature.index should now be working a lot better
- A couple of smaller bugfixes
0.14.0 (2019-06-20)
+++++++++++++++++++
- Added ``goto_*(prefer_stubs=True)`` as well as ``goto_*(prefer_stubs=True)``
- Stubs are used now for type inference
- Typeshed is used for better type inference
- Reworked Name.full_name, should have more correct return values
0.13.3 (2019-02-24)
+++++++++++++++++++
- Fixed an issue with embedded Python, see https://github.com/davidhalter/jedi-vim/issues/870
0.13.2 (2018-12-15)
+++++++++++++++++++
- Fixed a bug that led to Jedi spawning a lot of subprocesses.
0.13.1 (2018-10-02)
+++++++++++++++++++
- Bugfixes, because tensorflow completions were still slow.
0.13.0 (2018-10-02)
+++++++++++++++++++
- A small release. Some bug fixes.
- Remove Python 3.3 support. Python 3.3 support has been dropped by the Python
foundation.
- Default environments are now using the same Python version as the Python
process. In 0.12.x, we used to load the latest Python version on the system.
- Added ``include_builtins`` as a parameter to usages.
- ``goto_assignments`` has a new ``follow_builtin_imports`` parameter that
changes the previous behavior slightly.
0.12.1 (2018-06-30)
+++++++++++++++++++
- This release forces you to upgrade parso. If you don't, nothing will work
anymore. Otherwise changes should be limited to bug fixes. Unfortunately Jedi
still uses a few internals of parso that make it hard to keep compatibility
over multiple releases. Parso >=0.3.0 is going to be needed.
0.12.0 (2018-04-15)
+++++++++++++++++++
- Virtualenv/Environment support
- F-String Completion/Goto Support
- Cannot crash with segfaults anymore
- Cleaned up import logic
- Understand async/await and autocomplete it (including async generators)
- Better namespace completions
- Passing tests for Windows (including CI for Windows)
- Remove Python 2.6 support
0.11.1 (2017-12-14)
+++++++++++++++++++
- Parso update - the caching layer was broken
- Better usages - a lot of internal code was ripped out and improved.
0.11.0 (2017-09-20)
+++++++++++++++++++
- Split Jedi's parser into a separate project called ``parso``.
- Avoiding side effects in REPL completion.
- Numpy docstring support should be much better.
- Moved the `settings.*recursion*` away, they are no longer usable.
0.10.2 (2017-04-05)
+++++++++++++++++++
- Python Packaging sucks. Some files were not included in 0.10.1.
0.10.1 (2017-04-05)
+++++++++++++++++++
- Fixed a few very annoying bugs.
- Prepared the parser to be factored out of Jedi.
0.10.0 (2017-02-03)
+++++++++++++++++++
- Actual semantic completions for the complete Python syntax.
- Basic type inference for ``yield from`` PEP 380.
- PEP 484 support (most of the important features of it). Thanks Claude! (@reinhrst)
- Added ``get_line_code`` to ``Name`` and ``Completion`` objects.
- Completely rewritten the type inference engine.
- A new and better parser for (fast) parsing diffs of Python code.
0.9.0 (2015-04-10)
++++++++++++++++++
- The import logic has been rewritten to look more like Python's. There is now
an ``InferState.modules`` import cache, which resembles ``sys.modules``.
- Integrated the parser of 2to3. This will make refactoring possible. It will
also be possible to check for error messages (like compiling an AST would give)
in the future.
- With the new parser, the type inference also completely changed. It's now
simpler and more readable.
- Completely rewritten REPL completion.
- Added ``jedi.names``, a command to do static analysis. Thanks to that
sourcegraph guys for sponsoring this!
- Alpha version of the linter.
0.8.1 (2014-07-23)
+++++++++++++++++++
- Bugfix release, the last release forgot to include files that improve
autocompletion for builtin libraries. Fixed.
0.8.0 (2014-05-05)
+++++++++++++++++++
- Memory Consumption for compiled modules (e.g. builtins, sys) has been reduced
drastically. Loading times are down as well (it takes basically as long as an
import).
- REPL completion is starting to become usable.
- Various small API changes. Generally this release focuses on stability and
refactoring of internal APIs.
- Introducing operator precedence, which makes calculating correct Array
indices and ``__getattr__`` strings possible.
0.7.0 (2013-08-09)
++++++++++++++++++
- Switched from LGPL to MIT license.
- Added an Interpreter class to the API to make autocompletion in REPL
possible.
- Added autocompletion support for namespace packages.
- Add sith.py, a new random testing method.
0.6.0 (2013-05-14)
++++++++++++++++++
- Much faster parser with builtin part caching.
- A test suite, thanks @tkf.
0.5 versions (2012)
+++++++++++++++++++
- Initial development.

View File

@ -0,0 +1,8 @@
Pull Requests are great.
1. Fork the Repo on github.
2. If you are adding functionality or fixing a bug, please add a test!
3. Add your name to AUTHORS.txt
4. Push to your fork and submit a pull request.
**Try to use the PEP8 style guide** (and it's ok to have a line length of 100 characters).

View File

@ -0,0 +1,24 @@
All contributions towards Jedi are MIT licensed.
-------------------------------------------------------------------------------
The MIT License (MIT)
Copyright (c) <2013> <David Halter and others, see AUTHORS.txt>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -0,0 +1,15 @@
include README.rst
include CHANGELOG.rst
include LICENSE.txt
include AUTHORS.txt
include .coveragerc
include sith.py
include conftest.py
include pytest.ini
recursive-include jedi/third_party *.pyi
include jedi/third_party/typeshed/LICENSE
include jedi/third_party/django-stubs/LICENSE.txt
include jedi/third_party/typeshed/README
recursive-include test *
recursive-include docs *
recursive-exclude * *.pyc

216
bundle/jedi-vim/pythonx/jedi/README.rst vendored Normal file
View File

@ -0,0 +1,216 @@
####################################################################################
Jedi - an awesome autocompletion, static analysis and refactoring library for Python
####################################################################################
.. image:: http://isitmaintained.com/badge/open/davidhalter/jedi.svg
:target: https://github.com/davidhalter/jedi/issues
:alt: The percentage of open issues and pull requests
.. image:: http://isitmaintained.com/badge/resolution/davidhalter/jedi.svg
:target: https://github.com/davidhalter/jedi/issues
:alt: The resolution time is the median time an issue or pull request stays open.
.. image:: https://github.com/davidhalter/jedi/workflows/ci/badge.svg?branch=master
:target: https://github.com/davidhalter/jedi/actions
:alt: Tests
.. image:: https://pepy.tech/badge/jedi
:target: https://pepy.tech/project/jedi
:alt: PyPI Downloads
Jedi is a static analysis tool for Python that is typically used in
IDEs/editors plugins. Jedi has a focus on autocompletion and goto
functionality. Other features include refactoring, code search and finding
references.
Jedi has a simple API to work with. There is a reference implementation as a
`VIM-Plugin <https://github.com/davidhalter/jedi-vim>`_. Autocompletion in your
REPL is also possible, IPython uses it natively and for the CPython REPL you
can install it. Jedi is well tested and bugs should be rare.
Jedi can currently be used with the following editors/projects:
- Vim (jedi-vim_, YouCompleteMe_, deoplete-jedi_, completor.vim_)
- `Visual Studio Code`_ (via `Python Extension <https://marketplace.visualstudio.com/items?itemName=ms-python.python>`_)
- Emacs (Jedi.el_, company-mode_, elpy_, anaconda-mode_, ycmd_)
- Sublime Text (SublimeJEDI_ [ST2 + ST3], anaconda_ [only ST3])
- TextMate_ (Not sure if it's actually working)
- Kate_ version 4.13+ supports it natively, you have to enable it, though. [`see
<https://projects.kde.org/projects/kde/applications/kate/repository/show?rev=KDE%2F4.13>`_]
- Atom_ (autocomplete-python-jedi_)
- `GNOME Builder`_ (with support for GObject Introspection)
- Gedit (gedi_)
- wdb_ - Web Debugger
- `Eric IDE`_ (Available as a plugin)
- `IPython 6.0.0+ <https://ipython.readthedocs.io/en/stable/whatsnew/version6.html>`_
- `xonsh shell <https://xon.sh/contents.html>`_ has `jedi extension <https://xon.sh/xontribs.html#jedi>`_
and many more!
There are a few language servers that use Jedi:
- `jedi-language-server <https://github.com/pappasam/jedi-language-server>`_
- `python-language-server <https://github.com/palantir/python-language-server>`_
- `anakin-language-server <https://github.com/muffinmad/anakin-language-server>`_
Here are some pictures taken from jedi-vim_:
.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_complete.png
Completion for almost anything:
.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_function.png
Documentation:
.. image:: https://github.com/davidhalter/jedi/raw/master/docs/_screenshots/screenshot_pydoc.png
Get the latest version from `github <https://github.com/davidhalter/jedi>`_
(master branch should always be kind of stable/working).
Docs are available at `https://jedi.readthedocs.org/en/latest/
<https://jedi.readthedocs.org/en/latest/>`_. Pull requests with enhancements
and/or fixes are awesome and most welcome. Jedi uses `semantic versioning
<https://semver.org/>`_.
If you want to stay **up-to-date** with releases, please **subscribe** to this
mailing list: https://groups.google.com/g/jedi-announce. To subscribe you can
simply send an empty email to ``jedi-announce+subscribe@googlegroups.com``.
Issues & Questions
==================
You can file issues and questions in the `issue tracker
<https://github.com/davidhalter/jedi/>`. Alternatively you can also ask on
`Stack Overflow <https://stackoverflow.com/questions/tagged/python-jedi>`_ with
the label ``python-jedi``.
Installation
============
`Check out the docs <https://jedi.readthedocs.org/en/latest/docs/installation.html>`_.
Features and Limitations
========================
Jedi's features are listed here:
`Features <https://jedi.readthedocs.org/en/latest/docs/features.html>`_.
You can run Jedi on Python 3.6+ but it should also
understand code that is older than those versions. Additionally you should be
able to use `Virtualenvs <https://jedi.readthedocs.org/en/latest/docs/api.html#environments>`_
very well.
Tips on how to use Jedi efficiently can be found `here
<https://jedi.readthedocs.org/en/latest/docs/features.html#recipes>`_.
API
---
You can find a comprehensive documentation for the
`API here <https://jedi.readthedocs.org/en/latest/docs/api.html>`_.
Autocompletion / Goto / Documentation
-------------------------------------
There are the following commands:
- ``jedi.Script.goto``
- ``jedi.Script.infer``
- ``jedi.Script.help``
- ``jedi.Script.complete``
- ``jedi.Script.get_references``
- ``jedi.Script.get_signatures``
- ``jedi.Script.get_context``
The returned objects are very powerful and are really all you might need.
Autocompletion in your REPL (IPython, etc.)
-------------------------------------------
Jedi is a dependency of IPython. Autocompletion in IPython with Jedi is
therefore possible without additional configuration.
Here is an `example video <https://vimeo.com/122332037>`_ how REPL completion
can look like.
For the ``python`` shell you can enable tab completion in a `REPL
<https://jedi.readthedocs.org/en/latest/docs/usage.html#tab-completion-in-the-python-shell>`_.
Static Analysis
---------------
For a lot of forms of static analysis, you can try to use
``jedi.Script(...).get_names``. It will return a list of names that you can
then filter and work with. There is also a way to list the syntax errors in a
file: ``jedi.Script.get_syntax_errors``.
Refactoring
-----------
Jedi supports the following refactorings:
- ``jedi.Script.inline``
- ``jedi.Script.rename``
- ``jedi.Script.extract_function``
- ``jedi.Script.extract_variable``
Code Search
-----------
There is support for module search with ``jedi.Script.search``, and project
search for ``jedi.Project.search``. The way to search is either by providing a
name like ``foo`` or by using dotted syntax like ``foo.bar``. Additionally you
can provide the API type like ``class foo.bar.Bar``. There are also the
functions ``jedi.Script.complete_search`` and ``jedi.Project.complete_search``.
Development
===========
There's a pretty good and extensive `development documentation
<https://jedi.readthedocs.org/en/latest/docs/development.html>`_.
Testing
=======
The test suite uses ``pytest``::
pip install pytest
If you want to test only a specific Python version (e.g. Python 3.8), it is as
easy as::
python3.8 -m pytest
For more detailed information visit the `testing documentation
<https://jedi.readthedocs.org/en/latest/docs/testing.html>`_.
Acknowledgements
================
Thanks a lot to all the
`contributors <https://jedi.readthedocs.org/en/latest/docs/acknowledgements.html>`_!
.. _jedi-vim: https://github.com/davidhalter/jedi-vim
.. _youcompleteme: https://github.com/ycm-core/YouCompleteMe
.. _deoplete-jedi: https://github.com/zchee/deoplete-jedi
.. _completor.vim: https://github.com/maralla/completor.vim
.. _Jedi.el: https://github.com/tkf/emacs-jedi
.. _company-mode: https://github.com/syohex/emacs-company-jedi
.. _elpy: https://github.com/jorgenschaefer/elpy
.. _anaconda-mode: https://github.com/proofit404/anaconda-mode
.. _ycmd: https://github.com/abingham/emacs-ycmd
.. _sublimejedi: https://github.com/srusskih/SublimeJEDI
.. _anaconda: https://github.com/DamnWidget/anaconda
.. _wdb: https://github.com/Kozea/wdb
.. _TextMate: https://github.com/lawrenceakka/python-jedi.tmbundle
.. _Kate: https://kate-editor.org
.. _Atom: https://atom.io/
.. _autocomplete-python-jedi: https://atom.io/packages/autocomplete-python-jedi
.. _GNOME Builder: https://wiki.gnome.org/Apps/Builder
.. _Visual Studio Code: https://code.visualstudio.com/
.. _gedi: https://github.com/isamert/gedi
.. _Eric IDE: https://eric-ide.python-projects.org

172
bundle/jedi-vim/pythonx/jedi/conftest.py vendored Normal file
View File

@ -0,0 +1,172 @@
import tempfile
import shutil
import os
import sys
from functools import partial
import pytest
import jedi
from jedi.api.environment import get_system_environment, InterpreterEnvironment
from test.helpers import test_dir
collect_ignore = [
'setup.py',
'jedi/__main__.py',
'jedi/inference/compiled/subprocess/__main__.py',
'build/',
'test/examples',
'sith.py',
]
# The following hooks (pytest_configure, pytest_unconfigure) are used
# to modify `jedi.settings.cache_directory` because `clean_jedi_cache`
# has no effect during doctests. Without these hooks, doctests uses
# user's cache (e.g., ~/.cache/jedi/). We should remove this
# workaround once the problem is fixed in pytest.
#
# See:
# - https://github.com/davidhalter/jedi/pull/168
# - https://bitbucket.org/hpk42/pytest/issue/275/
jedi_cache_directory_orig = None
jedi_cache_directory_temp = None
def pytest_addoption(parser):
parser.addoption("--jedi-debug", "-D", action='store_true',
help="Enables Jedi's debug output.")
parser.addoption("--warning-is-error", action='store_true',
help="Warnings are treated as errors.")
parser.addoption("--env", action='store',
help="Execute the tests in that environment (e.g. 39 for python3.9).")
parser.addoption("--interpreter-env", "-I", action='store_true',
help="Don't use subprocesses to guarantee having safe "
"code execution. Useful for debugging.")
def pytest_configure(config):
global jedi_cache_directory_orig, jedi_cache_directory_temp
jedi_cache_directory_orig = jedi.settings.cache_directory
jedi_cache_directory_temp = tempfile.mkdtemp(prefix='jedi-test-')
jedi.settings.cache_directory = jedi_cache_directory_temp
if config.option.jedi_debug:
jedi.set_debug_function()
if config.option.warning_is_error:
import warnings
warnings.simplefilter("error")
def pytest_unconfigure(config):
global jedi_cache_directory_orig, jedi_cache_directory_temp
jedi.settings.cache_directory = jedi_cache_directory_orig
shutil.rmtree(jedi_cache_directory_temp)
@pytest.fixture(scope='session')
def clean_jedi_cache(request):
"""
Set `jedi.settings.cache_directory` to a temporary directory during test.
Note that you can't use built-in `tmpdir` and `monkeypatch`
fixture here because their scope is 'function', which is not used
in 'session' scope fixture.
This fixture is activated in ../pytest.ini.
"""
from jedi import settings
old = settings.cache_directory
tmp = tempfile.mkdtemp(prefix='jedi-test-')
settings.cache_directory = tmp
@request.addfinalizer
def restore():
settings.cache_directory = old
shutil.rmtree(tmp)
@pytest.fixture(scope='session')
def environment(request):
version = request.config.option.env
if version is None:
v = str(sys.version_info[0]) + str(sys.version_info[1])
version = os.environ.get('JEDI_TEST_ENVIRONMENT', v)
if request.config.option.interpreter_env or version == 'interpreter':
return InterpreterEnvironment()
if '.' not in version:
version = version[0] + '.' + version[1:]
return get_system_environment(version)
@pytest.fixture(scope='session')
def Script(environment):
return partial(jedi.Script, environment=environment)
@pytest.fixture(scope='session')
def ScriptWithProject(Script):
project = jedi.Project(test_dir)
return partial(jedi.Script, project=project)
@pytest.fixture(scope='session')
def get_names(Script):
return lambda code, **kwargs: Script(code).get_names(**kwargs)
@pytest.fixture(scope='session', params=['goto', 'infer'])
def goto_or_infer(request, Script):
return lambda code, *args, **kwargs: getattr(Script(code), request.param)(*args, **kwargs)
@pytest.fixture(scope='session', params=['goto', 'help'])
def goto_or_help(request, Script):
return lambda code, *args, **kwargs: getattr(Script(code), request.param)(*args, **kwargs)
@pytest.fixture(scope='session', params=['goto', 'help', 'infer'])
def goto_or_help_or_infer(request, Script):
def do(code, *args, **kwargs):
return getattr(Script(code), request.param)(*args, **kwargs)
do.type = request.param
return do
@pytest.fixture(scope='session', params=['goto', 'complete', 'help'])
def goto_or_complete(request, Script):
return lambda code, *args, **kwargs: getattr(Script(code), request.param)(*args, **kwargs)
@pytest.fixture(scope='session')
def has_django(environment):
script = jedi.Script('import django', environment=environment)
return bool(script.infer())
@pytest.fixture(scope='session')
def jedi_path():
return os.path.dirname(__file__)
@pytest.fixture()
def skip_pre_python38(environment):
if environment.version_info < (3, 8):
# This if is just needed to avoid that tests ever skip way more than
# they should for all Python versions.
pytest.skip()
@pytest.fixture()
def skip_pre_python37(environment):
if environment.version_info < (3, 7):
# This if is just needed to avoid that tests ever skip way more than
# they should for all Python versions.
pytest.skip()

View File

@ -0,0 +1,53 @@
#!/usr/bin/env bash
# The script creates a separate folder in build/ and creates tags there, pushes
# them and then uploads the package to PyPI.
set -eu -o pipefail
BASE_DIR=$(dirname $(readlink -f "$0"))
cd $BASE_DIR
git fetch --tags
PROJECT_NAME=jedi
BRANCH=master
BUILD_FOLDER=build
[ -d $BUILD_FOLDER ] || mkdir $BUILD_FOLDER
# Remove the previous deployment first.
# Checkout the right branch
cd $BUILD_FOLDER
rm -rf $PROJECT_NAME
git clone .. $PROJECT_NAME
cd $PROJECT_NAME
git checkout $BRANCH
git submodule update --init
# Test first.
pytest
# Create tag
tag=v$(python3 -c "import $PROJECT_NAME; print($PROJECT_NAME.__version__)")
master_ref=$(git show-ref -s heads/$BRANCH)
tag_ref=$(git show-ref -s $tag || true)
if [[ $tag_ref ]]; then
if [[ $tag_ref != $master_ref ]]; then
echo 'Cannot tag something that has already been tagged with another commit.'
exit 1
fi
else
git tag -a $tag
git push --tags
fi
# Package and upload to PyPI
#rm -rf dist/ - Not needed anymore, because the folder is never reused.
echo `pwd`
python3 setup.py sdist bdist_wheel
# Maybe do a pip install twine before.
twine upload dist/*
cd $BASE_DIR
# The tags have been pushed to this repo. Push the tags to github, now.
git push --tags

View File

@ -0,0 +1,153 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
-rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Jedi.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Jedi.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/Jedi"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Jedi"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."

View File

@ -0,0 +1,14 @@
Installation
------------
Install the graphviz library::
sudo apt-get install graphviz
Install sphinx::
sudo pip install sphinx
You might also need to install the Python graphviz interface::
sudo pip install graphviz

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

View File

@ -0,0 +1,9 @@
div.version {
color: black !important;
margin-top: -1.2em !important;
margin-bottom: .6em !important;
}
div.wy-side-nav-search {
padding-top: 0 !important;
}

View File

@ -0,0 +1,3 @@
The source of the logo is a photoshop file hosted here:
https://dl.dropboxusercontent.com/u/170011615/Jedi12_Logo.psd.xz

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

View File

@ -0,0 +1,4 @@
<h3>Github</h3>
<iframe src="https://ghbtns.com/github-btn.html?user=davidhalter&repo=jedi&type=watch&count=true&size=large"
frameborder="0" scrolling="0" width="170" height="30" allowtransparency="true"></iframe>
<br><br>

View File

@ -0,0 +1,3 @@
<p class="logo"><a href="{{ pathto(master_doc) }}">
<img class="logo" src="{{ pathto('_static/logo.png', 1) }}" alt="Logo"/>
</a></p>

View File

@ -0,0 +1,294 @@
# Jedi documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 26 00:11:34 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.todo',
'sphinx.ext.intersphinx', 'sphinx.ext.inheritance_diagram',
'sphinx_rtd_theme', 'sphinx.ext.autosummary']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Jedi'
copyright = 'jedi contributors'
import jedi
from jedi.utils import version_info
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y.Z version.
version = '.'.join(str(x) for x in version_info()[:3])
# The full version, including alpha/beta/rc tags.
release = jedi.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'logo_only': True,
'style_nav_header_background': 'white',
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['custom_style.css']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'sidebarlogo.html',
'localtoc.html',
#'relations.html',
'ghbuttons.html',
#'sourcelink.html',
'searchbox.html'
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Jedidoc'
#html_style = 'default.css' # Force usage of default template on RTD
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Jedi.tex', 'Jedi Documentation',
'Jedi contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'jedi', 'Jedi Documentation',
['Jedi contributors'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Jedi', 'Jedi Documentation',
'Jedi contributors', 'Jedi', 'Awesome Python autocompletion library.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for todo module ---------------------------------------------------
todo_include_todos = False
# -- Options for autodoc module ------------------------------------------------
autoclass_content = 'both'
autodoc_member_order = 'bysource'
autodoc_default_flags = []
#autodoc_default_flags = ['members', 'undoc-members']
# -- Options for intersphinx module --------------------------------------------
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'parso': ('https://parso.readthedocs.io/en/latest/', None),
}
def skip_deprecated(app, what, name, obj, skip, options):
"""
All attributes containing a deprecated note shouldn't be documented
anymore. This makes it even clearer that they are not supported anymore.
"""
doc = obj.__doc__
return skip or doc and '.. deprecated::' in doc
def setup(app):
app.connect('autodoc-skip-member', skip_deprecated)

View File

@ -0,0 +1,66 @@
.. include global.rst
History & Acknowledgements
==========================
Acknowledgements
----------------
- Dave Halter for creating and maintaining Jedi & Parso.
- Takafumi Arakaki (@tkf) for creating a solid test environment and a lot of
other things.
- Danilo Bargen (@dbrgn) for general housekeeping and being a good friend :).
- Guido van Rossum (@gvanrossum) for creating the parser generator pgen2
(originally used in lib2to3).
- Thanks to all the :ref:`contributors <contributors>`.
A Little Bit of History
-----------------------
Written by Dave.
The Star Wars Jedi are awesome. My Jedi software tries to imitate a little bit
of the precognition the Jedi have. There's even an awesome `scene
<https://youtu.be/yHRJLIf7wMU>`_ of Monty Python Jedis :-).
But actually the name has not much to do with Star Wars. It's part of my
second name Jedidjah.
I actually started Jedi back in 2012, because there were no good solutions
available for VIM. Most auto-completion solutions just did not work well. The
only good solution was PyCharm. But I liked my good old VIM very much. There
was also a solution called Rope that did not work at all for me. So I decided
to write my own version of a completion engine.
The first idea was to execute non-dangerous code. But I soon realized, that
this would not work. So I started to build a static analysis tool.
The biggest problem that I had at the time was that I did not know a thing
about parsers.I did not did not even know the word static analysis. It turns
out they are the foundation of a good static analysis tool. I of course did not
know that and tried to write my own poor version of a parser that I ended up
throwing away two years later.
Because of my lack of knowledge, everything after 2012 and before 2020 was
basically refactoring. I rewrote the core parts of Jedi probably like 5-10
times. The last big rewrite (that I did twice) was the inclusion of
gradual typing and stubs.
I learned during that time that it is crucial to have a good understanding of
your problem. Otherwise you just end up doing it again. I only wrote features
in the beginning and in the end. Everything else was bugfixing and refactoring.
However now I am really happy with the result. It works well, bugfixes can be
quick and is pretty much feature complete.
--------
I will leave you with a small annectote that happend in 2012, if I remember
correctly. After I explained Guido van Rossum, how some parts of my
auto-completion work, he said:
*"Oh, that worries me..."*
Now that it is finished, I hope he likes it :-).
.. _contributors:
.. include:: ../../AUTHORS.txt

View File

@ -0,0 +1,53 @@
.. include:: ../global.rst
.. _api-classes:
API Return Classes
------------------
Abstract Base Class
~~~~~~~~~~~~~~~~~~~
.. autoclass:: jedi.api.classes.BaseName
:members:
:show-inheritance:
Name
~~~~
.. autoclass:: jedi.api.classes.Name
:members:
:show-inheritance:
Completion
~~~~~~~~~~
.. autoclass:: jedi.api.classes.Completion
:members:
:show-inheritance:
BaseSignature
~~~~~~~~~~~~~
.. autoclass:: jedi.api.classes.BaseSignature
:members:
:show-inheritance:
Signature
~~~~~~~~~
.. autoclass:: jedi.api.classes.Signature
:members:
:show-inheritance:
ParamName
~~~~~~~~~
.. autoclass:: jedi.api.classes.ParamName
:members:
:show-inheritance:
Refactoring
~~~~~~~~~~~
.. autoclass:: jedi.api.refactoring.Refactoring
:members:
:show-inheritance:
.. autoclass:: jedi.api.errors.SyntaxError
:members:
:show-inheritance:

View File

@ -0,0 +1,173 @@
.. include:: ../global.rst
API Overview
============
.. note:: This documentation is mostly for Plugin developers, who want to
improve their editors/IDE with Jedi.
.. _api:
The API consists of a few different parts:
- The main starting points for complete/goto: :class:`.Script` and
:class:`.Interpreter`. If you work with Jedi you want to understand these
classes first.
- :ref:`API Result Classes <api-classes>`
- :ref:`Python Versions/Virtualenv Support <environments>` with functions like
:func:`.find_system_environments` and :func:`.find_virtualenvs`
- A way to work with different :ref:`Folders / Projects <projects>`
- Helpful functions: :func:`.preload_module` and :func:`.set_debug_function`
The methods that you are most likely going to use to work with Jedi are the
following ones:
.. currentmodule:: jedi
.. autosummary::
:nosignatures:
Script.complete
Script.goto
Script.infer
Script.help
Script.get_signatures
Script.get_references
Script.get_context
Script.get_names
Script.get_syntax_errors
Script.rename
Script.inline
Script.extract_variable
Script.extract_function
Script.search
Script.complete_search
Project.search
Project.complete_search
Script
------
.. autoclass:: jedi.Script
:members:
Interpreter
-----------
.. autoclass:: jedi.Interpreter
:members:
.. _projects:
Projects
--------
.. automodule:: jedi.api.project
.. autofunction:: jedi.get_default_project
.. autoclass:: jedi.Project
:members:
.. _environments:
Environments
------------
.. automodule:: jedi.api.environment
.. autofunction:: jedi.find_system_environments
.. autofunction:: jedi.find_virtualenvs
.. autofunction:: jedi.get_system_environment
.. autofunction:: jedi.create_environment
.. autofunction:: jedi.get_default_environment
.. autoexception:: jedi.InvalidPythonEnvironment
.. autoclass:: jedi.api.environment.Environment
:members:
Helper Functions
----------------
.. autofunction:: jedi.preload_module
.. autofunction:: jedi.set_debug_function
Errors
------
.. autoexception:: jedi.InternalError
.. autoexception:: jedi.RefactoringError
Examples
--------
Completions
~~~~~~~~~~~
.. sourcecode:: python
>>> import jedi
>>> code = '''import json; json.l'''
>>> script = jedi.Script(code, path='example.py')
>>> script
<Script: 'example.py' <SameEnvironment: 3.9.0 in /usr>>
>>> completions = script.complete(1, 19)
>>> completions
[<Completion: load>, <Completion: loads>]
>>> completions[1]
<Completion: loads>
>>> completions[1].complete
'oads'
>>> completions[1].name
'loads'
Type Inference / Goto
~~~~~~~~~~~~~~~~~~~~~
.. sourcecode:: python
>>> import jedi
>>> code = '''\
... def my_func():
... print 'called'
...
... alias = my_func
... my_list = [1, None, alias]
... inception = my_list[2]
...
... inception()'''
>>> script = jedi.Script(code)
>>>
>>> script.goto(8, 1)
[<Name full_name='__main__.inception', description='inception = my_list[2]'>]
>>>
>>> script.infer(8, 1)
[<Name full_name='__main__.my_func', description='def my_func'>]
References
~~~~~~~~~~
.. sourcecode:: python
>>> import jedi
>>> code = '''\
... x = 3
... if 1 == 2:
... x = 4
... else:
... del x'''
>>> script = jedi.Script(code)
>>> rns = script.get_references(5, 8)
>>> rns
[<Name full_name='__main__.x', description='x = 3'>,
<Name full_name='__main__.x', description='x = 4'>,
<Name full_name='__main__.x', description='del x'>]
>>> rns[1].line
3
>>> rns[1].column
4
Deprecations
------------
The deprecation process is as follows:
1. A deprecation is announced in any release.
2. The next major release removes the deprecated functionality.

View File

@ -0,0 +1 @@
.. include:: ../../CHANGELOG.rst

View File

@ -0,0 +1,219 @@
.. include:: ../global.rst
Jedi Development
================
.. currentmodule:: jedi
.. note:: This documentation is for Jedi developers who want to improve Jedi
itself, but have no idea how Jedi works. If you want to use Jedi for
your IDE, look at the `plugin api <api.html>`_.
It is also important to note that it's a pretty old version and some things
might not apply anymore.
Introduction
------------
This page tries to address the fundamental demand for documentation of the
|jedi| internals. Understanding a dynamic language is a complex task. Especially
because type inference in Python can be a very recursive task. Therefore |jedi|
couldn't get rid of complexity. I know that **simple is better than complex**,
but unfortunately it sometimes requires complex solutions to understand complex
systems.
In six chapters I'm trying to describe the internals of |jedi|:
- :ref:`The Jedi Core <core>`
- :ref:`Core Extensions <core-extensions>`
- :ref:`Imports & Modules <imports-modules>`
- :ref:`Stubs & Annotations <stubs>`
- :ref:`Caching & Recursions <caching-recursions>`
- :ref:`Helper modules <dev-helpers>`
.. note:: Testing is not documented here, you'll find that
`right here <testing.html>`_.
.. _core:
The Jedi Core
-------------
The core of Jedi consists of three parts:
- :ref:`Parser <parser>`
- :ref:`Python type inference <inference>`
- :ref:`API <dev-api>`
Most people are probably interested in :ref:`type inference <inference>`,
because that's where all the magic happens. I need to introduce the :ref:`parser
<parser>` first, because :mod:`jedi.inference` uses it extensively.
.. _parser:
Parser
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Jedi used to have its internal parser, however this is now a separate project
and is called `parso <http://parso.readthedocs.io>`_.
The parser creates a syntax tree that |jedi| analyses and tries to understand.
The grammar that this parser uses is very similar to the official Python
`grammar files <https://docs.python.org/3/reference/grammar.html>`_.
.. _inference:
Type inference of python code (inference/__init__.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: jedi.inference
Inference Values (inference/base_value.py)
++++++++++++++++++++++++++++++++++++++++++++++++++++++
.. automodule:: jedi.inference.base_value
.. inheritance-diagram::
jedi.inference.value.instance.TreeInstance
jedi.inference.value.klass.ClassValue
jedi.inference.value.function.FunctionValue
jedi.inference.value.function.FunctionExecutionContext
:parts: 1
.. _name_resolution:
Name resolution (inference/finder.py)
+++++++++++++++++++++++++++++++++++++
.. automodule:: jedi.inference.finder
.. _dev-api:
API (api/__init__.py and api/classes.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The API has been designed to be as easy to use as possible. The API
documentation can be found `here <api.html>`_. The API itself contains
little code that needs to be mentioned here. Generally I'm trying to be
conservative with the API. I'd rather not add new API features if they are not
necessary, because it's much harder to deprecate stuff than to add it later.
.. _core-extensions:
Core Extensions
---------------
Core Extensions is a summary of the following topics:
- :ref:`Iterables & Dynamic Arrays <iterables>`
- :ref:`Dynamic Parameters <dynamic_params>`
- :ref:`Docstrings <docstrings>`
- :ref:`Refactoring <refactoring>`
These topics are very important to understand what Jedi additionally does, but
they could be removed from Jedi and Jedi would still work. But slower and
without some features.
.. _iterables:
Iterables & Dynamic Arrays (inference/value/iterable.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To understand Python on a deeper level, |jedi| needs to understand some of the
dynamic features of Python like lists that are filled after creation:
.. automodule:: jedi.inference.value.iterable
.. _dynamic_params:
Parameter completion (inference/dynamic_params.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: jedi.inference.dynamic_params
.. _docstrings:
Docstrings (inference/docstrings.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: jedi.inference.docstrings
.. _refactoring:
Refactoring (api/refactoring.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: jedi.api.refactoring
.. _imports-modules:
Imports & Modules
-----------------
- :ref:`Modules <modules>`
- :ref:`Builtin Modules <builtin>`
- :ref:`Imports <imports>`
.. _builtin:
Compiled Modules (inference/compiled.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: jedi.inference.compiled
.. _imports:
Imports (inference/imports.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: jedi.inference.imports
.. _stubs:
Stubs & Annotations (inference/gradual)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: jedi.inference.gradual
.. _caching-recursions:
Caching & Recursions
--------------------
- :ref:`Caching <cache>`
- :ref:`Recursions <recursion>`
.. _cache:
Caching (cache.py)
~~~~~~~~~~~~~~~~~~
.. automodule:: jedi.cache
.. _recursion:
Recursions (recursion.py)
~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: jedi.inference.recursion
.. _dev-helpers:
Helper Modules
--------------
Most other modules are not really central to how Jedi works. They all contain
relevant code, but you if you understand the modules above, you pretty much
understand Jedi.

View File

@ -0,0 +1,110 @@
.. include:: ../global.rst
Features and Limitations
========================
Jedi's main API calls and features are:
- Autocompletion: :meth:`.Script.complete`; It's also possible to get it
working in :ref:`your REPL (IPython, etc.) <repl-completion>`
- Goto/Type Inference: :meth:`.Script.goto` and :meth:`.Script.infer`
- Static Analysis: :meth:`.Script.get_names` and :meth:`.Script.get_syntax_errors`
- Refactorings: :meth:`.Script.rename`, :meth:`.Script.inline`,
:meth:`.Script.extract_variable` and :meth:`.Script.extract_function`
- Code Search: :meth:`.Script.search` and :meth:`.Project.search`
Basic Features
--------------
- Python 3.6+ support
- Ignores syntax errors and wrong indentation
- Can deal with complex module / function / class structures
- Great ``virtualenv``/``venv`` support
- Works great with Python's :ref:`type hinting <type-hinting>`,
- Understands stub files
- Can infer function arguments for sphinx, epydoc and basic numpydoc docstrings
- Is overall a very solid piece of software that has been refined for a long
time. Bug reports are very welcome and are usually fixed within a few weeks.
Supported Python Features
-------------------------
|jedi| supports many of the widely used Python features:
- builtins
- returns, yields, yield from
- tuple assignments / array indexing / dictionary indexing / star unpacking
- with-statement / exception handling
- ``*args`` / ``**kwargs``
- decorators / lambdas / closures
- generators / iterators
- descriptors: property / staticmethod / classmethod / custom descriptors
- some magic methods: ``__call__``, ``__iter__``, ``__next__``, ``__get__``,
``__getitem__``, ``__init__``
- ``list.append()``, ``set.add()``, ``list.extend()``, etc.
- (nested) list comprehensions / ternary expressions
- relative imports
- ``getattr()`` / ``__getattr__`` / ``__getattribute__``
- function annotations
- simple/typical ``sys.path`` modifications
- ``isinstance`` checks for if/while/assert
- namespace packages (includes ``pkgutil``, ``pkg_resources`` and PEP420 namespaces)
- Django / Flask / Buildout support
- Understands Pytest fixtures
Limitations
-----------
In general Jedi's limit is quite high, but for very big projects or very
complex code, sometimes Jedi intentionally stops type inference, to avoid
hanging for a long time.
Additionally there are some Python patterns Jedi does not support. This is
intentional and below should be a complete list:
- Arbitrary metaclasses: Some metaclasses like enums and dataclasses are
reimplemented in Jedi to make them work. Most of the time stubs are good
enough to get type inference working, even when metaclasses are involved.
- ``setattr()``, ``__import__()``
- Writing to some dicts: ``globals()``, ``locals()``, ``object.__dict__``
- Manipulations of instances outside the instance variables without using
methods
Performance Issues
~~~~~~~~~~~~~~~~~~
Importing ``numpy`` can be quite slow sometimes, as well as loading the
builtins the first time. If you want to speed things up, you could preload
libriaries in |jedi|, with :func:`.preload_module`. However, once loaded, this
should not be a problem anymore. The same is true for huge modules like
``PySide``, ``wx``, ``tensorflow``, ``pandas``, etc.
Jedi does not have a very good cache layer. This is probably the biggest and
only architectural `issue <https://github.com/davidhalter/jedi/issues/1059>`_ in
Jedi. Unfortunately it is not easy to change that. Dave Halter is thinking
about rewriting Jedi in Rust, but it has taken Jedi more than 8 years to reach
version 1.0, a rewrite will probably also take years.
Security
--------
For :class:`.Script`
~~~~~~~~~~~~~~~~~~~~
Security is an important topic for |jedi|. By default, no code is executed
within Jedi. As long as you write pure Python, everything is inferred
statically. If you enable ``load_unsafe_extensions=True`` for your
:class:`.Project` and you use builtin modules (``c_builtin``) Jedi will execute
those modules. If you don't trust a code base, please do not enable that
option. It might lead to arbitrary code execution.
For :class:`.Interpreter`
~~~~~~~~~~~~~~~~~~~~~~~~~
If you want security for :class:`.Interpreter`, ``do not`` use it. Jedi does
execute properties and in general is not very careful to avoid code execution.
This is intentional: Most people trust the code bases they have imported,
because at that point a malicious code base would have had code execution
already.

View File

@ -0,0 +1,88 @@
.. include:: ../global.rst
Installation and Configuration
==============================
.. warning:: Most people will want to install Jedi as a submodule/vendored and
not through pip/system wide. The reason for this is that it makes sense that
the plugin that uses Jedi has always access to it. Otherwise Jedi will not
work properly when virtualenvs are activated. So please read the
documentation of your editor/IDE plugin to install Jedi.
For plugin developers, Jedi works best if it is always available. Vendoring
is a pretty good option for that.
You can either include |jedi| as a submodule in your text editor plugin (like
jedi-vim_ does by default), or you can install it systemwide.
.. note:: This just installs the |jedi| library, not the :ref:`editor plugins
<editor-plugins>`. For information about how to make it work with your
editor, refer to the corresponding documentation.
The normal way
--------------
Most people use Jedi with a :ref:`editor plugins<editor-plugins>`. Typically
you install Jedi by installing an editor plugin. No necessary steps are needed.
Just take a look at the instructions for the plugin.
With pip
--------
On any system you can install |jedi| directly from the Python package index
using pip::
sudo pip install jedi
If you want to install the current development version (master branch)::
sudo pip install -e git://github.com/davidhalter/jedi.git#egg=jedi
System-wide installation via a package manager
----------------------------------------------
Arch Linux
~~~~~~~~~~
You can install |jedi| directly from official Arch Linux packages:
- `python-jedi <https://www.archlinux.org/packages/community/any/python-jedi/>`__
(There is also a packaged version of the vim plugin available:
`vim-jedi at Arch Linux <https://www.archlinux.org/packages/community/any/vim-jedi/>`__.)
Debian
~~~~~~
Debian packages are available in the `unstable repository
<https://packages.debian.org/search?keywords=python%20jedi>`__.
Others
~~~~~~
We are in the discussion of adding |jedi| to the Fedora repositories.
Manual installation from GitHub
---------------------------------------------
If you prefer not to use an automated package installer, you can clone the source from GitHub and install it manually. To install it, run these commands::
git clone --recurse-submodules https://github.com/davidhalter/jedi
cd jedi
sudo python setup.py install
Inclusion as a submodule
------------------------
If you use an editor plugin like jedi-vim_, you can simply include |jedi| as a
git submodule of the plugin directory. Vim plugin managers like Vundle_ or
Pathogen_ make it very easy to keep submodules up to date.
.. _jedi-vim: https://github.com/davidhalter/jedi-vim
.. _vundle: https://github.com/gmarik/vundle
.. _pathogen: https://github.com/tpope/vim-pathogen

View File

@ -0,0 +1,6 @@
.. include:: ../global.rst
Settings
========
.. automodule:: jedi.settings

View File

@ -0,0 +1,36 @@
.. include:: ../global.rst
Jedi Testing
============
The test suite depends on ``pytest``::
pip install pytest
If you want to test only a specific Python version (e.g. Python 3.8), it is as
easy as::
python3.8 -m pytest
Tests are also run automatically on `GitHub Actions
<https://github.com/davidhalter/jedi/actions>`_.
You want to add a test for |jedi|? Great! We love that. Normally you should
write your tests as :ref:`Blackbox Tests <blackbox>`. Most tests would
fit right in there.
For specific API testing we're using simple unit tests, with a focus on a
simple and readable testing structure.
.. _blackbox:
Integration Tests (run.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: test.run
Refactoring Tests (refactor.py)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: test.refactor

View File

@ -0,0 +1,275 @@
.. include:: ../global.rst
Using Jedi
==========
|jedi| is can be used with a variety of :ref:`plugins <editor-plugins>`,
`language servers <language-servers>` and other software.
It is also possible to use |jedi| in the :ref:`Python shell or with IPython
<repl-completion>`.
Below you can also find a list of :ref:`recipes for type hinting <recipes>`.
.. _language-servers:
Language Servers
--------------
- `jedi-language-server <https://github.com/pappasam/jedi-language-server>`_
- `python-language-server <https://github.com/palantir/python-language-server>`_
- `anakin-language-server <https://github.com/muffinmad/anakin-language-server>`_
.. _editor-plugins:
Editor Plugins
--------------
Vim
~~~
- jedi-vim_
- YouCompleteMe_
- deoplete-jedi_
Visual Studio Code
~~~~~~~~~~~~~~~~~~
- `Python Extension`_
Emacs
~~~~~
- Jedi.el_
- elpy_
- anaconda-mode_
Sublime Text 2/3
~~~~~~~~~~~~~~~~
- SublimeJEDI_ (ST2 & ST3)
- anaconda_ (only ST3)
SynWrite
~~~~~~~~
- SynJedi_
TextMate
~~~~~~~~
- Textmate_ (Not sure if it's actually working)
Kate
~~~~
- Kate_ version 4.13+ `supports it natively
<https://projects.kde.org/projects/kde/applications/kate/repository/entry/addons/kate/pate/src/plugins/python_autocomplete_jedi.py?rev=KDE%2F4.13>`__,
you have to enable it, though.
Atom
~~~~
- autocomplete-python-jedi_
GNOME Builder
~~~~~~~~~~~~~
- `GNOME Builder`_ `supports it natively
<https://git.gnome.org/browse/gnome-builder/tree/plugins/jedi>`__,
and is enabled by default.
Gedit
~~~~~
- gedi_
Eric IDE
~~~~~~~~
- `Eric IDE`_ (Available as a plugin)
Web Debugger
~~~~~~~~~~~~
- wdb_
xonsh shell
~~~~~~~~~~~
Jedi is a preinstalled extension in `xonsh shell <https://xon.sh/contents.html>`_.
Run the following command to enable:
::
xontrib load jedi
and many more!
.. _repl-completion:
Tab Completion in the Python Shell
----------------------------------
Jedi is a dependency of IPython. Autocompletion in IPython is therefore
possible without additional configuration.
Here is an `example video <https://vimeo.com/122332037>`_ how REPL completion
can look like in a different shell.
There are two different options how you can use Jedi autocompletion in
your ``python`` interpreter. One with your custom ``$HOME/.pythonrc.py`` file
and one that uses ``PYTHONSTARTUP``.
Using ``PYTHONSTARTUP``
~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: jedi.api.replstartup
Using a Custom ``$HOME/.pythonrc.py``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autofunction:: jedi.utils.setup_readline
.. _recipes:
Recipes
-------
Here are some tips on how to use |jedi| efficiently.
.. _type-hinting:
Type Hinting
~~~~~~~~~~~~
If |jedi| cannot detect the type of a function argument correctly (due to the
dynamic nature of Python), you can help it by hinting the type using
one of the docstring/annotation styles below. **Only gradual typing will
always work**, all the docstring solutions are glorified hacks and more
complicated cases will probably not work.
Official Gradual Typing (Recommended)
+++++++++++++++++++++++++++++++++++++
You can read a lot about Python's gradual typing system in the corresponding
PEPs like:
- `PEP 484 <https://www.python.org/dev/peps/pep-0484/>`_ as an introduction
- `PEP 526 <https://www.python.org/dev/peps/pep-0526/>`_ for variable annotations
- `PEP 589 <https://www.python.org/dev/peps/pep-0589/>`_ for ``TypeDict``
- There are probably more :)
Below you can find a few examples how you can use this feature.
Function annotations::
def myfunction(node: ProgramNode, foo: str) -> None:
"""Do something with a ``node``.
"""
node.| # complete here
Assignment, for-loop and with-statement type hints::
import typing
x: int = foo()
y: typing.Optional[int] = 3
key: str
value: Employee
for key, value in foo.items():
pass
f: Union[int, float]
with foo() as f:
print(f + 3)
PEP-0484 should be supported in its entirety. Feel free to open issues if that
is not the case. You can also use stub files.
Sphinx style
++++++++++++
http://www.sphinx-doc.org/en/stable/domains.html#info-field-lists
::
def myfunction(node, foo):
"""
Do something with a ``node``.
:type node: ProgramNode
:param str foo: foo parameter description
"""
node.| # complete here
Epydoc
++++++
http://epydoc.sourceforge.net/manual-fields.html
::
def myfunction(node):
"""
Do something with a ``node``.
@type node: ProgramNode
"""
node.| # complete here
Numpydoc
++++++++
https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
In order to support the numpydoc format, you need to install the `numpydoc
<https://pypi.python.org/pypi/numpydoc>`__ package.
::
def foo(var1, var2, long_var_name='hi'):
r"""
A one-line summary that does not use variable names or the
function name.
...
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists,
etc. -- that can be converted to an array. We can also
refer to variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
...
"""
var2.| # complete here
.. _jedi-vim: https://github.com/davidhalter/jedi-vim
.. _youcompleteme: https://valloric.github.io/YouCompleteMe/
.. _deoplete-jedi: https://github.com/zchee/deoplete-jedi
.. _Jedi.el: https://github.com/tkf/emacs-jedi
.. _elpy: https://github.com/jorgenschaefer/elpy
.. _anaconda-mode: https://github.com/proofit404/anaconda-mode
.. _sublimejedi: https://github.com/srusskih/SublimeJEDI
.. _anaconda: https://github.com/DamnWidget/anaconda
.. _SynJedi: http://uvviewsoft.com/synjedi/
.. _wdb: https://github.com/Kozea/wdb
.. _TextMate: https://github.com/lawrenceakka/python-jedi.tmbundle
.. _kate: https://kate-editor.org/
.. _autocomplete-python-jedi: https://atom.io/packages/autocomplete-python-jedi
.. _GNOME Builder: https://wiki.gnome.org/Apps/Builder/
.. _gedi: https://github.com/isamert/gedi
.. _Eric IDE: https://eric-ide.python-projects.org
.. _Python Extension: https://marketplace.visualstudio.com/items?itemName=ms-python.python

View File

@ -0,0 +1,3 @@
:orphan:
.. |jedi| replace:: Jedi

View File

@ -0,0 +1,72 @@
.. include global.rst
.. meta::
:github_url: https://github.com/davidhalter/jedi
Jedi - an awesome autocompletion, static analysis and refactoring library for Python
====================================================================================
.. image:: https://img.shields.io/github/stars/davidhalter/jedi.svg?style=social&label=Star&maxAge=2592000
:target: https://github.com/davidhalter/jedi
:alt: GitHub stars
.. image:: http://isitmaintained.com/badge/open/davidhalter/jedi.svg
:target: https://github.com/davidhalter/jedi/issues
:alt: The percentage of open issues and pull requests
.. image:: http://isitmaintained.com/badge/resolution/davidhalter/jedi.svg
:target: https://github.com/davidhalter/jedi/issues
:alt: The resolution time is the median time an issue or pull request stays open.
.. image:: https://github.com/davidhalter/jedi/workflows/ci/badge.svg?branch=master
:target: https://github.com/davidhalter/jedi/actions
:alt: Tests
.. image:: https://coveralls.io/repos/davidhalter/jedi/badge.svg?branch=master
:target: https://coveralls.io/r/davidhalter/jedi
:alt: Coverage status
.. image:: https://pepy.tech/badge/jedi
:target: https://pepy.tech/project/jedi
:alt: PyPI Downloads
`Github Repository <https://github.com/davidhalter/jedi>`_
.. automodule:: jedi
Autocompletion can for example look like this in jedi-vim:
.. figure:: _screenshots/screenshot_complete.png
.. _toc:
Docs
----
.. toctree::
:maxdepth: 1
docs/usage
docs/features
docs/api
docs/api-classes
docs/installation
docs/settings
docs/development
docs/testing
docs/acknowledgements
docs/changelog
.. _resources:
Resources
---------
If you want to stay **up-to-date** with releases, please **subscribe** to this
mailing list: https://groups.google.com/g/jedi-announce. To subscribe you can
simply send an empty email to ``jedi-announce+subscribe@googlegroups.com``.
- `Source Code on Github <https://github.com/davidhalter/jedi>`_
- `Python Package Index <https://pypi.python.org/pypi/jedi/>`_

View File

@ -0,0 +1,42 @@
"""
Jedi is a static analysis tool for Python that is typically used in
IDEs/editors plugins. Jedi has a focus on autocompletion and goto
functionality. Other features include refactoring, code search and finding
references.
Jedi has a simple API to work with. There is a reference implementation as a
`VIM-Plugin <https://github.com/davidhalter/jedi-vim>`_. Autocompletion in your
REPL is also possible, IPython uses it natively and for the CPython REPL you
can install it. Jedi is well tested and bugs should be rare.
Here's a simple example of the autocompletion feature:
>>> import jedi
>>> source = '''
... import json
... json.lo'''
>>> script = jedi.Script(source, path='example.py')
>>> script
<Script: 'example.py' ...>
>>> completions = script.complete(3, len('json.lo'))
>>> completions
[<Completion: load>, <Completion: loads>]
>>> print(completions[0].complete)
ad
>>> print(completions[0].name)
load
"""
__version__ = '0.18.1'
from jedi.api import Script, Interpreter, set_debug_function, preload_module
from jedi import settings
from jedi.api.environment import find_virtualenvs, find_system_environments, \
get_default_environment, InvalidPythonEnvironment, create_environment, \
get_system_environment, InterpreterEnvironment
from jedi.api.project import Project, get_default_project
from jedi.api.exceptions import InternalError, RefactoringError
# Finally load the internal plugins. This is only internal.
from jedi.plugins import registry
del registry

View File

@ -0,0 +1,72 @@
import sys
from os.path import join, dirname, abspath, isdir
def _start_linter():
"""
This is a pre-alpha API. You're not supposed to use it at all, except for
testing. It will very likely change.
"""
import jedi
if '--debug' in sys.argv:
jedi.set_debug_function()
for path in sys.argv[2:]:
if path.startswith('--'):
continue
if isdir(path):
import fnmatch
import os
paths = []
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, '*.py'):
paths.append(os.path.join(root, filename))
else:
paths = [path]
try:
for p in paths:
for error in jedi.Script(path=p)._analysis():
print(error)
except Exception:
if '--pdb' in sys.argv:
import traceback
traceback.print_exc()
import pdb
pdb.post_mortem()
else:
raise
def _complete():
import jedi
import pdb
if '-d' in sys.argv:
sys.argv.remove('-d')
jedi.set_debug_function()
try:
completions = jedi.Script(sys.argv[2]).complete()
for c in completions:
c.docstring()
c.type
except Exception as e:
print(repr(e))
pdb.post_mortem()
else:
print(completions)
if len(sys.argv) == 2 and sys.argv[1] == 'repl':
# don't want to use __main__ only for repl yet, maybe we want to use it for
# something else. So just use the keyword ``repl`` for now.
print(join(dirname(abspath(__file__)), 'api', 'replstartup.py'))
elif len(sys.argv) > 1 and sys.argv[1] == '_linter':
_start_linter()
elif len(sys.argv) > 1 and sys.argv[1] == '_complete':
_complete()
else:
print('Command not implemented: %s' % sys.argv[1])

View File

@ -0,0 +1,32 @@
"""
This module is here to ensure compatibility of Windows/Linux/MacOS and
different Python versions.
"""
import errno
import sys
import pickle
def pickle_load(file):
try:
return pickle.load(file)
# Python on Windows don't throw EOF errors for pipes. So reraise them with
# the correct type, which is caught upwards.
except OSError:
if sys.platform == 'win32':
raise EOFError()
raise
def pickle_dump(data, file, protocol):
try:
pickle.dump(data, file, protocol)
# On Python 3.3 flush throws sometimes an error even though the writing
# operation should be completed.
file.flush()
# Python on Windows don't throw EPIPE errors for pipes. So reraise them with
# the correct type and error number.
except OSError:
if sys.platform == 'win32':
raise IOError(errno.EPIPE, "Broken pipe")
raise

View File

@ -0,0 +1,777 @@
"""
The API basically only provides one class. You can create a :class:`Script` and
use its methods.
Additionally you can add a debug function with :func:`set_debug_function`.
Alternatively, if you don't need a custom function and are happy with printing
debug messages to stdout, simply call :func:`set_debug_function` without
arguments.
"""
import sys
from pathlib import Path
import parso
from parso.python import tree
from jedi.parser_utils import get_executable_nodes
from jedi import debug
from jedi import settings
from jedi import cache
from jedi.file_io import KnownContentFileIO
from jedi.api import classes
from jedi.api import interpreter
from jedi.api import helpers
from jedi.api.helpers import validate_line_column
from jedi.api.completion import Completion, search_in_module
from jedi.api.keywords import KeywordName
from jedi.api.environment import InterpreterEnvironment
from jedi.api.project import get_default_project, Project
from jedi.api.errors import parso_to_jedi_errors
from jedi.api import refactoring
from jedi.api.refactoring.extract import extract_function, extract_variable
from jedi.inference import InferenceState
from jedi.inference import imports
from jedi.inference.references import find_references
from jedi.inference.arguments import try_iter_content
from jedi.inference.helpers import infer_call_of_leaf
from jedi.inference.sys_path import transform_path_to_dotted
from jedi.inference.syntax_tree import tree_name_to_values
from jedi.inference.value import ModuleValue
from jedi.inference.base_value import ValueSet
from jedi.inference.value.iterable import unpack_tuple_to_dict
from jedi.inference.gradual.conversion import convert_names, convert_values
from jedi.inference.gradual.utils import load_proper_stub_module
from jedi.inference.utils import to_list
# Jedi uses lots and lots of recursion. By setting this a little bit higher, we
# can remove some "maximum recursion depth" errors.
sys.setrecursionlimit(3000)
class Script:
"""
A Script is the base for completions, goto or whatever you want to do with
Jedi. The counter part of this class is :class:`Interpreter`, which works
with actual dictionaries and can work with a REPL. This class
should be used when a user edits code in an editor.
You can either use the ``code`` parameter or ``path`` to read a file.
Usually you're going to want to use both of them (in an editor).
The Script's ``sys.path`` is very customizable:
- If `project` is provided with a ``sys_path``, that is going to be used.
- If `environment` is provided, its ``sys.path`` will be used
(see :func:`Environment.get_sys_path <jedi.api.environment.Environment.get_sys_path>`);
- Otherwise ``sys.path`` will match that of the default environment of
Jedi, which typically matches the sys path that was used at the time
when Jedi was imported.
Most methods have a ``line`` and a ``column`` parameter. Lines in Jedi are
always 1-based and columns are always zero based. To avoid repetition they
are not always documented. You can omit both line and column. Jedi will
then just do whatever action you are calling at the end of the file. If you
provide only the line, just will complete at the end of that line.
.. warning:: By default :attr:`jedi.settings.fast_parser` is enabled, which means
that parso reuses modules (i.e. they are not immutable). With this setting
Jedi is **not thread safe** and it is also not safe to use multiple
:class:`.Script` instances and its definitions at the same time.
If you are a normal plugin developer this should not be an issue. It is
an issue for people that do more complex stuff with Jedi.
This is purely a performance optimization and works pretty well for all
typical usages, however consider to turn the setting off if it causes
you problems. See also
`this discussion <https://github.com/davidhalter/jedi/issues/1240>`_.
:param code: The source code of the current file, separated by newlines.
:type code: str
:param path: The path of the file in the file system, or ``''`` if
it hasn't been saved yet.
:type path: str or pathlib.Path or None
:param Environment environment: Provide a predefined :ref:`Environment <environments>`
to work with a specific Python version or virtualenv.
:param Project project: Provide a :class:`.Project` to make sure finding
references works well, because the right folder is searched. There are
also ways to modify the sys path and other things.
"""
def __init__(self, code=None, *, path=None, environment=None, project=None):
self._orig_path = path
if isinstance(path, str):
path = Path(path)
self.path = path.absolute() if path else None
if code is None:
if path is None:
raise ValueError("Must provide at least one of code or path")
# TODO add a better warning than the traceback!
with open(path, 'rb') as f:
code = f.read()
if project is None:
# Load the Python grammar of the current interpreter.
project = get_default_project(None if self.path is None else self.path.parent)
self._inference_state = InferenceState(
project, environment=environment, script_path=self.path
)
debug.speed('init')
self._module_node, code = self._inference_state.parse_and_get_code(
code=code,
path=self.path,
use_latest_grammar=path and path.suffix == '.pyi',
cache=False, # No disk cache, because the current script often changes.
diff_cache=settings.fast_parser,
cache_path=settings.cache_directory,
)
debug.speed('parsed')
self._code_lines = parso.split_lines(code, keepends=True)
self._code = code
cache.clear_time_caches()
debug.reset_time()
# Cache the module, this is mostly useful for testing, since this shouldn't
# be called multiple times.
@cache.memoize_method
def _get_module(self):
names = None
is_package = False
if self.path is not None:
import_names, is_p = transform_path_to_dotted(
self._inference_state.get_sys_path(add_parent_paths=False),
self.path
)
if import_names is not None:
names = import_names
is_package = is_p
if self.path is None:
file_io = None
else:
file_io = KnownContentFileIO(self.path, self._code)
if self.path is not None and self.path.suffix == '.pyi':
# We are in a stub file. Try to load the stub properly.
stub_module = load_proper_stub_module(
self._inference_state,
self._inference_state.latest_grammar,
file_io,
names,
self._module_node
)
if stub_module is not None:
return stub_module
if names is None:
names = ('__main__',)
module = ModuleValue(
self._inference_state, self._module_node,
file_io=file_io,
string_names=names,
code_lines=self._code_lines,
is_package=is_package,
)
if names[0] not in ('builtins', 'typing'):
# These modules are essential for Jedi, so don't overwrite them.
self._inference_state.module_cache.add(names, ValueSet([module]))
return module
def _get_module_context(self):
return self._get_module().as_context()
def __repr__(self):
return '<%s: %s %r>' % (
self.__class__.__name__,
repr(self._orig_path),
self._inference_state.environment,
)
@validate_line_column
def complete(self, line=None, column=None, *, fuzzy=False):
"""
Completes objects under the cursor.
Those objects contain information about the completions, more than just
names.
:param fuzzy: Default False. Will return fuzzy completions, which means
that e.g. ``ooa`` will match ``foobar``.
:return: Completion objects, sorted by name. Normal names appear
before "private" names that start with ``_`` and those appear
before magic methods and name mangled names that start with ``__``.
:rtype: list of :class:`.Completion`
"""
with debug.increase_indent_cm('complete'):
completion = Completion(
self._inference_state, self._get_module_context(), self._code_lines,
(line, column), self.get_signatures, fuzzy=fuzzy,
)
return completion.complete()
@validate_line_column
def infer(self, line=None, column=None, *, only_stubs=False, prefer_stubs=False):
"""
Return the definitions of under the cursor. It is basically a wrapper
around Jedi's type inference.
This method follows complicated paths and returns the end, not the
first definition. The big difference between :meth:`goto` and
:meth:`infer` is that :meth:`goto` doesn't
follow imports and statements. Multiple objects may be returned,
because depending on an option you can have two different versions of a
function.
:param only_stubs: Only return stubs for this method.
:param prefer_stubs: Prefer stubs to Python objects for this method.
:rtype: list of :class:`.Name`
"""
pos = line, column
leaf = self._module_node.get_name_of_position(pos)
if leaf is None:
leaf = self._module_node.get_leaf_for_position(pos)
if leaf is None or leaf.type == 'string':
return []
if leaf.end_pos == (line, column) and leaf.type == 'operator':
next_ = leaf.get_next_leaf()
if next_.start_pos == leaf.end_pos \
and next_.type in ('number', 'string', 'keyword'):
leaf = next_
context = self._get_module_context().create_context(leaf)
values = helpers.infer(self._inference_state, context, leaf)
values = convert_values(
values,
only_stubs=only_stubs,
prefer_stubs=prefer_stubs,
)
defs = [classes.Name(self._inference_state, c.name) for c in values]
# The additional set here allows the definitions to become unique in an
# API sense. In the internals we want to separate more things than in
# the API.
return helpers.sorted_definitions(set(defs))
@validate_line_column
def goto(self, line=None, column=None, *, follow_imports=False, follow_builtin_imports=False,
only_stubs=False, prefer_stubs=False):
"""
Goes to the name that defined the object under the cursor. Optionally
you can follow imports.
Multiple objects may be returned, depending on an if you can have two
different versions of a function.
:param follow_imports: The method will follow imports.
:param follow_builtin_imports: If ``follow_imports`` is True will try
to look up names in builtins (i.e. compiled or extension modules).
:param only_stubs: Only return stubs for this method.
:param prefer_stubs: Prefer stubs to Python objects for this method.
:rtype: list of :class:`.Name`
"""
tree_name = self._module_node.get_name_of_position((line, column))
if tree_name is None:
# Without a name we really just want to jump to the result e.g.
# executed by `foo()`, if we the cursor is after `)`.
return self.infer(line, column, only_stubs=only_stubs, prefer_stubs=prefer_stubs)
name = self._get_module_context().create_name(tree_name)
# Make it possible to goto the super class function/attribute
# definitions, when they are overwritten.
names = []
if name.tree_name.is_definition() and name.parent_context.is_class():
class_node = name.parent_context.tree_node
class_value = self._get_module_context().create_value(class_node)
mro = class_value.py__mro__()
next(mro) # Ignore the first entry, because it's the class itself.
for cls in mro:
names = cls.goto(tree_name.value)
if names:
break
if not names:
names = list(name.goto())
if follow_imports:
names = helpers.filter_follow_imports(names, follow_builtin_imports)
names = convert_names(
names,
only_stubs=only_stubs,
prefer_stubs=prefer_stubs,
)
defs = [classes.Name(self._inference_state, d) for d in set(names)]
# Avoid duplicates
return list(set(helpers.sorted_definitions(defs)))
def search(self, string, *, all_scopes=False):
"""
Searches a name in the current file. For a description of how the
search string should look like, please have a look at
:meth:`.Project.search`.
:param bool all_scopes: Default False; searches not only for
definitions on the top level of a module level, but also in
functions and classes.
:yields: :class:`.Name`
"""
return self._search_func(string, all_scopes=all_scopes)
@to_list
def _search_func(self, string, all_scopes=False, complete=False, fuzzy=False):
names = self._names(all_scopes=all_scopes)
wanted_type, wanted_names = helpers.split_search_string(string)
return search_in_module(
self._inference_state,
self._get_module_context(),
names=names,
wanted_type=wanted_type,
wanted_names=wanted_names,
complete=complete,
fuzzy=fuzzy,
)
def complete_search(self, string, **kwargs):
"""
Like :meth:`.Script.search`, but completes that string. If you want to
have all possible definitions in a file you can also provide an empty
string.
:param bool all_scopes: Default False; searches not only for
definitions on the top level of a module level, but also in
functions and classes.
:param fuzzy: Default False. Will return fuzzy completions, which means
that e.g. ``ooa`` will match ``foobar``.
:yields: :class:`.Completion`
"""
return self._search_func(string, complete=True, **kwargs)
@validate_line_column
def help(self, line=None, column=None):
"""
Used to display a help window to users. Uses :meth:`.Script.goto` and
returns additional definitions for keywords and operators.
Typically you will want to display :meth:`.BaseName.docstring` to the
user for all the returned definitions.
The additional definitions are ``Name(...).type == 'keyword'``.
These definitions do not have a lot of value apart from their docstring
attribute, which contains the output of Python's :func:`help` function.
:rtype: list of :class:`.Name`
"""
definitions = self.goto(line, column, follow_imports=True)
if definitions:
return definitions
leaf = self._module_node.get_leaf_for_position((line, column))
if leaf is not None and leaf.type in ('keyword', 'operator', 'error_leaf'):
def need_pydoc():
if leaf.value in ('(', ')', '[', ']'):
if leaf.parent.type == 'trailer':
return False
if leaf.parent.type == 'atom':
return False
grammar = self._inference_state.grammar
# This parso stuff is not public, but since I control it, this
# is fine :-) ~dave
reserved = grammar._pgen_grammar.reserved_syntax_strings.keys()
return leaf.value in reserved
if need_pydoc():
name = KeywordName(self._inference_state, leaf.value)
return [classes.Name(self._inference_state, name)]
return []
@validate_line_column
def get_references(self, line=None, column=None, **kwargs):
"""
Lists all references of a variable in a project. Since this can be
quite hard to do for Jedi, if it is too complicated, Jedi will stop
searching.
:param include_builtins: Default ``True``. If ``False``, checks if a definition
is a builtin (e.g. ``sys``) and in that case does not return it.
:param scope: Default ``'project'``. If ``'file'``, include references in
the current module only.
:rtype: list of :class:`.Name`
"""
def _references(include_builtins=True, scope='project'):
if scope not in ('project', 'file'):
raise ValueError('Only the scopes "file" and "project" are allowed')
tree_name = self._module_node.get_name_of_position((line, column))
if tree_name is None:
# Must be syntax
return []
names = find_references(self._get_module_context(), tree_name, scope == 'file')
definitions = [classes.Name(self._inference_state, n) for n in names]
if not include_builtins or scope == 'file':
definitions = [d for d in definitions if not d.in_builtin_module()]
return helpers.sorted_definitions(definitions)
return _references(**kwargs)
@validate_line_column
def get_signatures(self, line=None, column=None):
"""
Return the function object of the call under the cursor.
E.g. if the cursor is here::
abs(# <-- cursor is here
This would return the ``abs`` function. On the other hand::
abs()# <-- cursor is here
This would return an empty list..
:rtype: list of :class:`.Signature`
"""
pos = line, column
call_details = helpers.get_signature_details(self._module_node, pos)
if call_details is None:
return []
context = self._get_module_context().create_context(call_details.bracket_leaf)
definitions = helpers.cache_signatures(
self._inference_state,
context,
call_details.bracket_leaf,
self._code_lines,
pos
)
debug.speed('func_call followed')
# TODO here we use stubs instead of the actual values. We should use
# the signatures from stubs, but the actual values, probably?!
return [classes.Signature(self._inference_state, signature, call_details)
for signature in definitions.get_signatures()]
@validate_line_column
def get_context(self, line=None, column=None):
"""
Returns the scope context under the cursor. This basically means the
function, class or module where the cursor is at.
:rtype: :class:`.Name`
"""
pos = (line, column)
leaf = self._module_node.get_leaf_for_position(pos, include_prefixes=True)
if leaf.start_pos > pos or leaf.type == 'endmarker':
previous_leaf = leaf.get_previous_leaf()
if previous_leaf is not None:
leaf = previous_leaf
module_context = self._get_module_context()
n = tree.search_ancestor(leaf, 'funcdef', 'classdef')
if n is not None and n.start_pos < pos <= n.children[-1].start_pos:
# This is a bit of a special case. The context of a function/class
# name/param/keyword is always it's parent context, not the
# function itself. Catch all the cases here where we are before the
# suite object, but still in the function.
context = module_context.create_value(n).as_context()
else:
context = module_context.create_context(leaf)
while context.name is None:
context = context.parent_context # comprehensions
definition = classes.Name(self._inference_state, context.name)
while definition.type != 'module':
name = definition._name # TODO private access
tree_name = name.tree_name
if tree_name is not None: # Happens with lambdas.
scope = tree_name.get_definition()
if scope.start_pos[1] < column:
break
definition = definition.parent()
return definition
def _analysis(self):
self._inference_state.is_analysis = True
self._inference_state.analysis_modules = [self._module_node]
module = self._get_module_context()
try:
for node in get_executable_nodes(self._module_node):
context = module.create_context(node)
if node.type in ('funcdef', 'classdef'):
# Resolve the decorators.
tree_name_to_values(self._inference_state, context, node.children[1])
elif isinstance(node, tree.Import):
import_names = set(node.get_defined_names())
if node.is_nested():
import_names |= set(path[-1] for path in node.get_paths())
for n in import_names:
imports.infer_import(context, n)
elif node.type == 'expr_stmt':
types = context.infer_node(node)
for testlist in node.children[:-1:2]:
# Iterate tuples.
unpack_tuple_to_dict(context, types, testlist)
else:
if node.type == 'name':
defs = self._inference_state.infer(context, node)
else:
defs = infer_call_of_leaf(context, node)
try_iter_content(defs)
self._inference_state.reset_recursion_limitations()
ana = [a for a in self._inference_state.analysis if self.path == a.path]
return sorted(set(ana), key=lambda x: x.line)
finally:
self._inference_state.is_analysis = False
def get_names(self, **kwargs):
"""
Returns names defined in the current file.
:param all_scopes: If True lists the names of all scopes instead of
only the module namespace.
:param definitions: If True lists the names that have been defined by a
class, function or a statement (``a = b`` returns ``a``).
:param references: If True lists all the names that are not listed by
``definitions=True``. E.g. ``a = b`` returns ``b``.
:rtype: list of :class:`.Name`
"""
names = self._names(**kwargs)
return [classes.Name(self._inference_state, n) for n in names]
def get_syntax_errors(self):
"""
Lists all syntax errors in the current file.
:rtype: list of :class:`.SyntaxError`
"""
return parso_to_jedi_errors(self._inference_state.grammar, self._module_node)
def _names(self, all_scopes=False, definitions=True, references=False):
# Set line/column to a random position, because they don't matter.
module_context = self._get_module_context()
defs = [
module_context.create_name(name)
for name in helpers.get_module_names(
self._module_node,
all_scopes=all_scopes,
definitions=definitions,
references=references,
)
]
return sorted(defs, key=lambda x: x.start_pos)
def rename(self, line=None, column=None, *, new_name):
"""
Renames all references of the variable under the cursor.
:param new_name: The variable under the cursor will be renamed to this
string.
:raises: :exc:`.RefactoringError`
:rtype: :class:`.Refactoring`
"""
definitions = self.get_references(line, column, include_builtins=False)
return refactoring.rename(self._inference_state, definitions, new_name)
@validate_line_column
def extract_variable(self, line, column, *, new_name, until_line=None, until_column=None):
"""
Moves an expression to a new statemenet.
For example if you have the cursor on ``foo`` and provide a
``new_name`` called ``bar``::
foo = 3.1
x = int(foo + 1)
the code above will become::
foo = 3.1
bar = foo + 1
x = int(bar)
:param new_name: The expression under the cursor will be renamed to
this string.
:param int until_line: The the selection range ends at this line, when
omitted, Jedi will be clever and try to define the range itself.
:param int until_column: The the selection range ends at this column, when
omitted, Jedi will be clever and try to define the range itself.
:raises: :exc:`.RefactoringError`
:rtype: :class:`.Refactoring`
"""
if until_line is None and until_column is None:
until_pos = None
else:
if until_line is None:
until_line = line
if until_column is None:
until_column = len(self._code_lines[until_line - 1])
until_pos = until_line, until_column
return extract_variable(
self._inference_state, self.path, self._module_node,
new_name, (line, column), until_pos
)
@validate_line_column
def extract_function(self, line, column, *, new_name, until_line=None, until_column=None):
"""
Moves an expression to a new function.
For example if you have the cursor on ``foo`` and provide a
``new_name`` called ``bar``::
global_var = 3
def x():
foo = 3.1
x = int(foo + 1 + global_var)
the code above will become::
global_var = 3
def bar(foo):
return int(foo + 1 + global_var)
def x():
foo = 3.1
x = bar(foo)
:param new_name: The expression under the cursor will be replaced with
a function with this name.
:param int until_line: The the selection range ends at this line, when
omitted, Jedi will be clever and try to define the range itself.
:param int until_column: The the selection range ends at this column, when
omitted, Jedi will be clever and try to define the range itself.
:raises: :exc:`.RefactoringError`
:rtype: :class:`.Refactoring`
"""
if until_line is None and until_column is None:
until_pos = None
else:
if until_line is None:
until_line = line
if until_column is None:
until_column = len(self._code_lines[until_line - 1])
until_pos = until_line, until_column
return extract_function(
self._inference_state, self.path, self._get_module_context(),
new_name, (line, column), until_pos
)
def inline(self, line=None, column=None):
"""
Inlines a variable under the cursor. This is basically the opposite of
extracting a variable. For example with the cursor on bar::
foo = 3.1
bar = foo + 1
x = int(bar)
the code above will become::
foo = 3.1
x = int(foo + 1)
:raises: :exc:`.RefactoringError`
:rtype: :class:`.Refactoring`
"""
names = [d._name for d in self.get_references(line, column, include_builtins=True)]
return refactoring.inline(self._inference_state, names)
class Interpreter(Script):
"""
Jedi's API for Python REPLs.
Implements all of the methods that are present in :class:`.Script` as well.
In addition to completions that normal REPL completion does like
``str.upper``, Jedi also supports code completion based on static code
analysis. For example Jedi will complete ``str().upper``.
>>> from os.path import join
>>> namespace = locals()
>>> script = Interpreter('join("").up', [namespace])
>>> print(script.complete()[0].name)
upper
All keyword arguments are same as the arguments for :class:`.Script`.
:param str code: Code to parse.
:type namespaces: typing.List[dict]
:param namespaces: A list of namespace dictionaries such as the one
returned by :func:`globals` and :func:`locals`.
"""
_allow_descriptor_getattr_default = True
def __init__(self, code, namespaces, *, project=None, **kwds):
try:
namespaces = [dict(n) for n in namespaces]
except Exception:
raise TypeError("namespaces must be a non-empty list of dicts.")
environment = kwds.get('environment', None)
if environment is None:
environment = InterpreterEnvironment()
else:
if not isinstance(environment, InterpreterEnvironment):
raise TypeError("The environment needs to be an InterpreterEnvironment subclass.")
if project is None:
project = Project(Path.cwd())
super().__init__(code, environment=environment, project=project, **kwds)
self.namespaces = namespaces
self._inference_state.allow_descriptor_getattr = self._allow_descriptor_getattr_default
@cache.memoize_method
def _get_module_context(self):
if self.path is None:
file_io = None
else:
file_io = KnownContentFileIO(self.path, self._code)
tree_module_value = ModuleValue(
self._inference_state, self._module_node,
file_io=file_io,
string_names=('__main__',),
code_lines=self._code_lines,
)
return interpreter.MixedModuleContext(
tree_module_value,
self.namespaces,
)
def preload_module(*modules):
"""
Preloading modules tells Jedi to load a module now, instead of lazy parsing
of modules. This can be useful for IDEs, to control which modules to load
on startup.
:param modules: different module names, list of string.
"""
for m in modules:
s = "import %s as x; x." % m
Script(s).complete(1, len(s))
def set_debug_function(func_cb=debug.print_to_stdout, warnings=True,
notices=True, speed=True):
"""
Define a callback debug function to get all the debug messages.
If you don't specify any arguments, debug messages will be printed to stdout.
:param func_cb: The callback function for debug messages.
"""
debug.debug_function = func_cb
debug.enable_warning = warnings
debug.enable_notice = notices
debug.enable_speed = speed

View File

@ -0,0 +1,896 @@
"""
There are a couple of classes documented in here:
- :class:`.BaseName` as an abstact base class for almost everything.
- :class:`.Name` used in a lot of places
- :class:`.Completion` for completions
- :class:`.BaseSignature` as a base class for signatures
- :class:`.Signature` for :meth:`.Script.get_signatures` only
- :class:`.ParamName` used for parameters of signatures
- :class:`.Refactoring` for refactorings
- :class:`.SyntaxError` for :meth:`.Script.get_syntax_errors` only
These classes are the much biggest part of the API, because they contain
the interesting information about all operations.
"""
import re
from pathlib import Path
from typing import Optional
from parso.tree import search_ancestor
from jedi import settings
from jedi import debug
from jedi.inference.utils import unite
from jedi.cache import memoize_method
from jedi.inference.compiled.mixed import MixedName
from jedi.inference.names import ImportName, SubModuleName
from jedi.inference.gradual.stub_value import StubModuleValue
from jedi.inference.gradual.conversion import convert_names, convert_values
from jedi.inference.base_value import ValueSet, HasNoContext
from jedi.api.keywords import KeywordName
from jedi.api import completion_cache
from jedi.api.helpers import filter_follow_imports
def _sort_names_by_start_pos(names):
return sorted(names, key=lambda s: s.start_pos or (0, 0))
def defined_names(inference_state, value):
"""
List sub-definitions (e.g., methods in class).
:type scope: Scope
:rtype: list of Name
"""
try:
context = value.as_context()
except HasNoContext:
return []
filter = next(context.get_filters())
names = [name for name in filter.values()]
return [Name(inference_state, n) for n in _sort_names_by_start_pos(names)]
def _values_to_definitions(values):
return [Name(c.inference_state, c.name) for c in values]
class BaseName:
"""
The base class for all definitions, completions and signatures.
"""
_mapping = {
'posixpath': 'os.path',
'riscospath': 'os.path',
'ntpath': 'os.path',
'os2emxpath': 'os.path',
'macpath': 'os.path',
'genericpath': 'os.path',
'posix': 'os',
'_io': 'io',
'_functools': 'functools',
'_collections': 'collections',
'_socket': 'socket',
'_sqlite3': 'sqlite3',
}
_tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in {
'argparse._ActionsContainer': 'argparse.ArgumentParser',
}.items())
def __init__(self, inference_state, name):
self._inference_state = inference_state
self._name = name
"""
An instance of :class:`parso.python.tree.Name` subclass.
"""
self.is_keyword = isinstance(self._name, KeywordName)
@memoize_method
def _get_module_context(self):
# This can take a while to complete, because in the worst case of
# imports (consider `import a` completions), we need to load all
# modules starting with a first.
return self._name.get_root_context()
@property
def module_path(self) -> Optional[Path]:
"""
Shows the file path of a module. e.g. ``/usr/lib/python3.9/os.py``
"""
module = self._get_module_context()
if module.is_stub() or not module.is_compiled():
# Compiled modules should not return a module path even if they
# have one.
path: Optional[Path] = self._get_module_context().py__file__()
if path is not None:
return path
return None
@property
def name(self):
"""
Name of variable/function/class/module.
For example, for ``x = None`` it returns ``'x'``.
:rtype: str or None
"""
return self._name.get_public_name()
@property
def type(self):
"""
The type of the definition.
Here is an example of the value of this attribute. Let's consider
the following source. As what is in ``variable`` is unambiguous
to Jedi, :meth:`jedi.Script.infer` should return a list of
definition for ``sys``, ``f``, ``C`` and ``x``.
>>> from jedi import Script
>>> source = '''
... import keyword
...
... class C:
... pass
...
... class D:
... pass
...
... x = D()
...
... def f():
... pass
...
... for variable in [keyword, f, C, x]:
... variable'''
>>> script = Script(source)
>>> defs = script.infer()
Before showing what is in ``defs``, let's sort it by :attr:`line`
so that it is easy to relate the result to the source code.
>>> defs = sorted(defs, key=lambda d: d.line)
>>> print(defs) # doctest: +NORMALIZE_WHITESPACE
[<Name full_name='keyword', description='module keyword'>,
<Name full_name='__main__.C', description='class C'>,
<Name full_name='__main__.D', description='instance D'>,
<Name full_name='__main__.f', description='def f'>]
Finally, here is what you can get from :attr:`type`:
>>> defs = [d.type for d in defs]
>>> defs[0]
'module'
>>> defs[1]
'class'
>>> defs[2]
'instance'
>>> defs[3]
'function'
Valid values for type are ``module``, ``class``, ``instance``, ``function``,
``param``, ``path``, ``keyword``, ``property`` and ``statement``.
"""
tree_name = self._name.tree_name
resolve = False
if tree_name is not None:
# TODO move this to their respective names.
definition = tree_name.get_definition()
if definition is not None and definition.type == 'import_from' and \
tree_name.is_definition():
resolve = True
if isinstance(self._name, SubModuleName) or resolve:
for value in self._name.infer():
return value.api_type
return self._name.api_type
@property
def module_name(self):
"""
The module name, a bit similar to what ``__name__`` is in a random
Python module.
>>> from jedi import Script
>>> source = 'import json'
>>> script = Script(source, path='example.py')
>>> d = script.infer()[0]
>>> print(d.module_name) # doctest: +ELLIPSIS
json
"""
return self._get_module_context().py__name__()
def in_builtin_module(self):
"""
Returns True, if this is a builtin module.
"""
value = self._get_module_context().get_value()
if isinstance(value, StubModuleValue):
return any(v.is_compiled() for v in value.non_stub_value_set)
return value.is_compiled()
@property
def line(self):
"""The line where the definition occurs (starting with 1)."""
start_pos = self._name.start_pos
if start_pos is None:
return None
return start_pos[0]
@property
def column(self):
"""The column where the definition occurs (starting with 0)."""
start_pos = self._name.start_pos
if start_pos is None:
return None
return start_pos[1]
def get_definition_start_position(self):
"""
The (row, column) of the start of the definition range. Rows start with
1, columns start with 0.
:rtype: Optional[Tuple[int, int]]
"""
if self._name.tree_name is None:
return None
definition = self._name.tree_name.get_definition()
if definition is None:
return self._name.start_pos
return definition.start_pos
def get_definition_end_position(self):
"""
The (row, column) of the end of the definition range. Rows start with
1, columns start with 0.
:rtype: Optional[Tuple[int, int]]
"""
if self._name.tree_name is None:
return None
definition = self._name.tree_name.get_definition()
if definition is None:
return self._name.tree_name.end_pos
if self.type in ("function", "class"):
last_leaf = definition.get_last_leaf()
if last_leaf.type == "newline":
return last_leaf.get_previous_leaf().end_pos
return last_leaf.end_pos
return definition.end_pos
def docstring(self, raw=False, fast=True):
r"""
Return a document string for this completion object.
Example:
>>> from jedi import Script
>>> source = '''\
... def f(a, b=1):
... "Document for function f."
... '''
>>> script = Script(source, path='example.py')
>>> doc = script.infer(1, len('def f'))[0].docstring()
>>> print(doc)
f(a, b=1)
<BLANKLINE>
Document for function f.
Notice that useful extra information is added to the actual
docstring, e.g. function signatures are prepended to their docstrings.
If you need the actual docstring, use ``raw=True`` instead.
>>> print(script.infer(1, len('def f'))[0].docstring(raw=True))
Document for function f.
:param fast: Don't follow imports that are only one level deep like
``import foo``, but follow ``from foo import bar``. This makes
sense for speed reasons. Completing `import a` is slow if you use
the ``foo.docstring(fast=False)`` on every object, because it
parses all libraries starting with ``a``.
"""
if isinstance(self._name, ImportName) and fast:
return ''
doc = self._get_docstring()
if raw:
return doc
signature_text = self._get_docstring_signature()
if signature_text and doc:
return signature_text + '\n\n' + doc
else:
return signature_text + doc
def _get_docstring(self):
return self._name.py__doc__()
def _get_docstring_signature(self):
return '\n'.join(
signature.to_string()
for signature in self._get_signatures(for_docstring=True)
)
@property
def description(self):
"""
A description of the :class:`.Name` object, which is heavily used
in testing. e.g. for ``isinstance`` it returns ``def isinstance``.
Example:
>>> from jedi import Script
>>> source = '''
... def f():
... pass
...
... class C:
... pass
...
... variable = f if random.choice([0,1]) else C'''
>>> script = Script(source) # line is maximum by default
>>> defs = script.infer(column=3)
>>> defs = sorted(defs, key=lambda d: d.line)
>>> print(defs) # doctest: +NORMALIZE_WHITESPACE
[<Name full_name='__main__.f', description='def f'>,
<Name full_name='__main__.C', description='class C'>]
>>> str(defs[0].description)
'def f'
>>> str(defs[1].description)
'class C'
"""
typ = self.type
tree_name = self._name.tree_name
if typ == 'param':
return typ + ' ' + self._name.to_string()
if typ in ('function', 'class', 'module', 'instance') or tree_name is None:
if typ == 'function':
# For the description we want a short and a pythonic way.
typ = 'def'
return typ + ' ' + self._name.get_public_name()
definition = tree_name.get_definition(include_setitem=True) or tree_name
# Remove the prefix, because that's not what we want for get_code
# here.
txt = definition.get_code(include_prefix=False)
# Delete comments:
txt = re.sub(r'#[^\n]+\n', ' ', txt)
# Delete multi spaces/newlines
txt = re.sub(r'\s+', ' ', txt).strip()
return txt
@property
def full_name(self):
"""
Dot-separated path of this object.
It is in the form of ``<module>[.<submodule>[...]][.<object>]``.
It is useful when you want to look up Python manual of the
object at hand.
Example:
>>> from jedi import Script
>>> source = '''
... import os
... os.path.join'''
>>> script = Script(source, path='example.py')
>>> print(script.infer(3, len('os.path.join'))[0].full_name)
os.path.join
Notice that it returns ``'os.path.join'`` instead of (for example)
``'posixpath.join'``. This is not correct, since the modules name would
be ``<module 'posixpath' ...>```. However most users find the latter
more practical.
"""
if not self._name.is_value_name:
return None
names = self._name.get_qualified_names(include_module_names=True)
if names is None:
return None
names = list(names)
try:
names[0] = self._mapping[names[0]]
except KeyError:
pass
return '.'.join(names)
def is_stub(self):
"""
Returns True if the current name is defined in a stub file.
"""
if not self._name.is_value_name:
return False
return self._name.get_root_context().is_stub()
def is_side_effect(self):
"""
Checks if a name is defined as ``self.foo = 3``. In case of self, this
function would return False, for foo it would return True.
"""
tree_name = self._name.tree_name
if tree_name is None:
return False
return tree_name.is_definition() and tree_name.parent.type == 'trailer'
@debug.increase_indent_cm('goto on name')
def goto(self, *, follow_imports=False, follow_builtin_imports=False,
only_stubs=False, prefer_stubs=False):
"""
Like :meth:`.Script.goto` (also supports the same params), but does it
for the current name. This is typically useful if you are using
something like :meth:`.Script.get_names()`.
:param follow_imports: The goto call will follow imports.
:param follow_builtin_imports: If follow_imports is True will try to
look up names in builtins (i.e. compiled or extension modules).
:param only_stubs: Only return stubs for this goto call.
:param prefer_stubs: Prefer stubs to Python objects for this goto call.
:rtype: list of :class:`Name`
"""
if not self._name.is_value_name:
return []
names = self._name.goto()
if follow_imports:
names = filter_follow_imports(names, follow_builtin_imports)
names = convert_names(
names,
only_stubs=only_stubs,
prefer_stubs=prefer_stubs,
)
return [self if n == self._name else Name(self._inference_state, n)
for n in names]
@debug.increase_indent_cm('infer on name')
def infer(self, *, only_stubs=False, prefer_stubs=False):
"""
Like :meth:`.Script.infer`, it can be useful to understand which type
the current name has.
Return the actual definitions. I strongly recommend not using it for
your completions, because it might slow down |jedi|. If you want to
read only a few objects (<=20), it might be useful, especially to get
the original docstrings. The basic problem of this function is that it
follows all results. This means with 1000 completions (e.g. numpy),
it's just very, very slow.
:param only_stubs: Only return stubs for this goto call.
:param prefer_stubs: Prefer stubs to Python objects for this type
inference call.
:rtype: list of :class:`Name`
"""
assert not (only_stubs and prefer_stubs)
if not self._name.is_value_name:
return []
# First we need to make sure that we have stub names (if possible) that
# we can follow. If we don't do that, we can end up with the inferred
# results of Python objects instead of stubs.
names = convert_names([self._name], prefer_stubs=True)
values = convert_values(
ValueSet.from_sets(n.infer() for n in names),
only_stubs=only_stubs,
prefer_stubs=prefer_stubs,
)
resulting_names = [c.name for c in values]
return [self if n == self._name else Name(self._inference_state, n)
for n in resulting_names]
def parent(self):
"""
Returns the parent scope of this identifier.
:rtype: Name
"""
if not self._name.is_value_name:
return None
if self.type in ('function', 'class', 'param') and self._name.tree_name is not None:
# Since the parent_context doesn't really match what the user
# thinks of that the parent is here, we do these cases separately.
# The reason for this is the following:
# - class: Nested classes parent_context is always the
# parent_context of the most outer one.
# - function: Functions in classes have the module as
# parent_context.
# - param: The parent_context of a param is not its function but
# e.g. the outer class or module.
cls_or_func_node = self._name.tree_name.get_definition()
parent = search_ancestor(cls_or_func_node, 'funcdef', 'classdef', 'file_input')
context = self._get_module_context().create_value(parent).as_context()
else:
context = self._name.parent_context
if context is None:
return None
while context.name is None:
# Happens for comprehension contexts
context = context.parent_context
return Name(self._inference_state, context.name)
def __repr__(self):
return "<%s %sname=%r, description=%r>" % (
self.__class__.__name__,
'full_' if self.full_name else '',
self.full_name or self.name,
self.description,
)
def get_line_code(self, before=0, after=0):
"""
Returns the line of code where this object was defined.
:param before: Add n lines before the current line to the output.
:param after: Add n lines after the current line to the output.
:return str: Returns the line(s) of code or an empty string if it's a
builtin.
"""
if not self._name.is_value_name:
return ''
lines = self._name.get_root_context().code_lines
if lines is None:
# Probably a builtin module, just ignore in that case.
return ''
index = self._name.start_pos[0] - 1
start_index = max(index - before, 0)
return ''.join(lines[start_index:index + after + 1])
def _get_signatures(self, for_docstring=False):
if self._name.api_type == 'property':
return []
if for_docstring and self._name.api_type == 'statement' and not self.is_stub():
# For docstrings we don't resolve signatures if they are simple
# statements and not stubs. This is a speed optimization.
return []
if isinstance(self._name, MixedName):
# While this would eventually happen anyway, it's basically just a
# shortcut to not infer anything tree related, because it's really
# not necessary.
return self._name.infer_compiled_value().get_signatures()
names = convert_names([self._name], prefer_stubs=True)
return [sig for name in names for sig in name.infer().get_signatures()]
def get_signatures(self):
"""
Returns all potential signatures for a function or a class. Multiple
signatures are typical if you use Python stubs with ``@overload``.
:rtype: list of :class:`BaseSignature`
"""
return [
BaseSignature(self._inference_state, s)
for s in self._get_signatures()
]
def execute(self):
"""
Uses type inference to "execute" this identifier and returns the
executed objects.
:rtype: list of :class:`Name`
"""
return _values_to_definitions(self._name.infer().execute_with_values())
def get_type_hint(self):
"""
Returns type hints like ``Iterable[int]`` or ``Union[int, str]``.
This method might be quite slow, especially for functions. The problem
is finding executions for those functions to return something like
``Callable[[int, str], str]``.
:rtype: str
"""
return self._name.infer().get_type_hint()
class Completion(BaseName):
"""
``Completion`` objects are returned from :meth:`.Script.complete`. They
provide additional information about a completion.
"""
def __init__(self, inference_state, name, stack, like_name_length,
is_fuzzy, cached_name=None):
super().__init__(inference_state, name)
self._like_name_length = like_name_length
self._stack = stack
self._is_fuzzy = is_fuzzy
self._cached_name = cached_name
# Completion objects with the same Completion name (which means
# duplicate items in the completion)
self._same_name_completions = []
def _complete(self, like_name):
append = ''
if settings.add_bracket_after_function \
and self.type == 'function':
append = '('
name = self._name.get_public_name()
if like_name:
name = name[self._like_name_length:]
return name + append
@property
def complete(self):
"""
Only works with non-fuzzy completions. Returns None if fuzzy
completions are used.
Return the rest of the word, e.g. completing ``isinstance``::
isinstan# <-- Cursor is here
would return the string 'ce'. It also adds additional stuff, depending
on your ``settings.py``.
Assuming the following function definition::
def foo(param=0):
pass
completing ``foo(par`` would give a ``Completion`` which ``complete``
would be ``am=``.
"""
if self._is_fuzzy:
return None
return self._complete(True)
@property
def name_with_symbols(self):
"""
Similar to :attr:`.name`, but like :attr:`.name` returns also the
symbols, for example assuming the following function definition::
def foo(param=0):
pass
completing ``foo(`` would give a ``Completion`` which
``name_with_symbols`` would be "param=".
"""
return self._complete(False)
def docstring(self, raw=False, fast=True):
"""
Documented under :meth:`BaseName.docstring`.
"""
if self._like_name_length >= 3:
# In this case we can just resolve the like name, because we
# wouldn't load like > 100 Python modules anymore.
fast = False
return super().docstring(raw=raw, fast=fast)
def _get_docstring(self):
if self._cached_name is not None:
return completion_cache.get_docstring(
self._cached_name,
self._name.get_public_name(),
lambda: self._get_cache()
)
return super()._get_docstring()
def _get_docstring_signature(self):
if self._cached_name is not None:
return completion_cache.get_docstring_signature(
self._cached_name,
self._name.get_public_name(),
lambda: self._get_cache()
)
return super()._get_docstring_signature()
def _get_cache(self):
return (
super().type,
super()._get_docstring_signature(),
super()._get_docstring(),
)
@property
def type(self):
"""
Documented under :meth:`BaseName.type`.
"""
# Purely a speed optimization.
if self._cached_name is not None:
return completion_cache.get_type(
self._cached_name,
self._name.get_public_name(),
lambda: self._get_cache()
)
return super().type
def get_completion_prefix_length(self):
"""
Returns the length of the prefix being completed.
For example, completing ``isinstance``::
isinstan# <-- Cursor is here
would return 8, because len('isinstan') == 8.
Assuming the following function definition::
def foo(param=0):
pass
completing ``foo(par`` would return 3.
"""
return self._like_name_length
def __repr__(self):
return '<%s: %s>' % (type(self).__name__, self._name.get_public_name())
class Name(BaseName):
"""
*Name* objects are returned from many different APIs including
:meth:`.Script.goto` or :meth:`.Script.infer`.
"""
def __init__(self, inference_state, definition):
super().__init__(inference_state, definition)
@memoize_method
def defined_names(self):
"""
List sub-definitions (e.g., methods in class).
:rtype: list of :class:`Name`
"""
defs = self._name.infer()
return sorted(
unite(defined_names(self._inference_state, d) for d in defs),
key=lambda s: s._name.start_pos or (0, 0)
)
def is_definition(self):
"""
Returns True, if defined as a name in a statement, function or class.
Returns False, if it's a reference to such a definition.
"""
if self._name.tree_name is None:
return True
else:
return self._name.tree_name.is_definition()
def __eq__(self, other):
return self._name.start_pos == other._name.start_pos \
and self.module_path == other.module_path \
and self.name == other.name \
and self._inference_state == other._inference_state
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self._name.start_pos, self.module_path, self.name, self._inference_state))
class BaseSignature(Name):
"""
These signatures are returned by :meth:`BaseName.get_signatures`
calls.
"""
def __init__(self, inference_state, signature):
super().__init__(inference_state, signature.name)
self._signature = signature
@property
def params(self):
"""
Returns definitions for all parameters that a signature defines.
This includes stuff like ``*args`` and ``**kwargs``.
:rtype: list of :class:`.ParamName`
"""
return [ParamName(self._inference_state, n)
for n in self._signature.get_param_names(resolve_stars=True)]
def to_string(self):
"""
Returns a text representation of the signature. This could for example
look like ``foo(bar, baz: int, **kwargs)``.
:rtype: str
"""
return self._signature.to_string()
class Signature(BaseSignature):
"""
A full signature object is the return value of
:meth:`.Script.get_signatures`.
"""
def __init__(self, inference_state, signature, call_details):
super().__init__(inference_state, signature)
self._call_details = call_details
self._signature = signature
@property
def index(self):
"""
Returns the param index of the current cursor position.
Returns None if the index cannot be found in the curent call.
:rtype: int
"""
return self._call_details.calculate_index(
self._signature.get_param_names(resolve_stars=True)
)
@property
def bracket_start(self):
"""
Returns a line/column tuple of the bracket that is responsible for the
last function call. The first line is 1 and the first column 0.
:rtype: int, int
"""
return self._call_details.bracket_leaf.start_pos
def __repr__(self):
return '<%s: index=%r %s>' % (
type(self).__name__,
self.index,
self._signature.to_string(),
)
class ParamName(Name):
def infer_default(self):
"""
Returns default values like the ``1`` of ``def foo(x=1):``.
:rtype: list of :class:`.Name`
"""
return _values_to_definitions(self._name.infer_default())
def infer_annotation(self, **kwargs):
"""
:param execute_annotation: Default True; If False, values are not
executed and classes are returned instead of instances.
:rtype: list of :class:`.Name`
"""
return _values_to_definitions(self._name.infer_annotation(ignore_stars=True, **kwargs))
def to_string(self):
"""
Returns a simple representation of a param, like
``f: Callable[..., Any]``.
:rtype: str
"""
return self._name.to_string()
@property
def kind(self):
"""
Returns an enum instance of :mod:`inspect`'s ``Parameter`` enum.
:rtype: :py:attr:`inspect.Parameter.kind`
"""
return self._name.get_kind()

View File

@ -0,0 +1,666 @@
import re
from textwrap import dedent
from inspect import Parameter
from parso.python.token import PythonTokenTypes
from parso.python import tree
from parso.tree import search_ancestor, Leaf
from parso import split_lines
from jedi import debug
from jedi import settings
from jedi.api import classes
from jedi.api import helpers
from jedi.api import keywords
from jedi.api.strings import complete_dict
from jedi.api.file_name import complete_file_name
from jedi.inference import imports
from jedi.inference.base_value import ValueSet
from jedi.inference.helpers import infer_call_of_leaf, parse_dotted_names
from jedi.inference.context import get_global_filters
from jedi.inference.value import TreeInstance
from jedi.inference.docstring_utils import DocstringModule
from jedi.inference.names import ParamNameWrapper, SubModuleName
from jedi.inference.gradual.conversion import convert_values, convert_names
from jedi.parser_utils import cut_value_at_position
from jedi.plugins import plugin_manager
class ParamNameWithEquals(ParamNameWrapper):
def get_public_name(self):
return self.string_name + '='
def _get_signature_param_names(signatures, positional_count, used_kwargs):
# Add named params
for call_sig in signatures:
for i, p in enumerate(call_sig.params):
kind = p.kind
if i < positional_count and kind == Parameter.POSITIONAL_OR_KEYWORD:
continue
if kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) \
and p.name not in used_kwargs:
yield ParamNameWithEquals(p._name)
def _must_be_kwarg(signatures, positional_count, used_kwargs):
if used_kwargs:
return True
must_be_kwarg = True
for signature in signatures:
for i, p in enumerate(signature.params):
kind = p.kind
if kind is Parameter.VAR_POSITIONAL:
# In case there were not already kwargs, the next param can
# always be a normal argument.
return False
if i >= positional_count and kind in (Parameter.POSITIONAL_OR_KEYWORD,
Parameter.POSITIONAL_ONLY):
must_be_kwarg = False
break
if not must_be_kwarg:
break
return must_be_kwarg
def filter_names(inference_state, completion_names, stack, like_name, fuzzy, cached_name):
comp_dct = set()
if settings.case_insensitive_completion:
like_name = like_name.lower()
for name in completion_names:
string = name.string_name
if settings.case_insensitive_completion:
string = string.lower()
if helpers.match(string, like_name, fuzzy=fuzzy):
new = classes.Completion(
inference_state,
name,
stack,
len(like_name),
is_fuzzy=fuzzy,
cached_name=cached_name,
)
k = (new.name, new.complete) # key
if k not in comp_dct:
comp_dct.add(k)
tree_name = name.tree_name
if tree_name is not None:
definition = tree_name.get_definition()
if definition is not None and definition.type == 'del_stmt':
continue
yield new
def _remove_duplicates(completions, other_completions):
names = {d.name for d in other_completions}
return [c for c in completions if c.name not in names]
def get_user_context(module_context, position):
"""
Returns the scope in which the user resides. This includes flows.
"""
leaf = module_context.tree_node.get_leaf_for_position(position, include_prefixes=True)
return module_context.create_context(leaf)
def get_flow_scope_node(module_node, position):
node = module_node.get_leaf_for_position(position, include_prefixes=True)
while not isinstance(node, (tree.Scope, tree.Flow)):
node = node.parent
return node
@plugin_manager.decorate()
def complete_param_names(context, function_name, decorator_nodes):
# Basically there's no way to do param completion. The plugins are
# responsible for this.
return []
class Completion:
def __init__(self, inference_state, module_context, code_lines, position,
signatures_callback, fuzzy=False):
self._inference_state = inference_state
self._module_context = module_context
self._module_node = module_context.tree_node
self._code_lines = code_lines
# The first step of completions is to get the name
self._like_name = helpers.get_on_completion_name(self._module_node, code_lines, position)
# The actual cursor position is not what we need to calculate
# everything. We want the start of the name we're on.
self._original_position = position
self._signatures_callback = signatures_callback
self._fuzzy = fuzzy
def complete(self):
leaf = self._module_node.get_leaf_for_position(
self._original_position,
include_prefixes=True
)
string, start_leaf, quote = _extract_string_while_in_string(leaf, self._original_position)
prefixed_completions = complete_dict(
self._module_context,
self._code_lines,
start_leaf or leaf,
self._original_position,
None if string is None else quote + string,
fuzzy=self._fuzzy,
)
if string is not None and not prefixed_completions:
prefixed_completions = list(complete_file_name(
self._inference_state, self._module_context, start_leaf, quote, string,
self._like_name, self._signatures_callback,
self._code_lines, self._original_position,
self._fuzzy
))
if string is not None:
if not prefixed_completions and '\n' in string:
# Complete only multi line strings
prefixed_completions = self._complete_in_string(start_leaf, string)
return prefixed_completions
cached_name, completion_names = self._complete_python(leaf)
completions = list(filter_names(self._inference_state, completion_names,
self.stack, self._like_name,
self._fuzzy, cached_name=cached_name))
return (
# Removing duplicates mostly to remove False/True/None duplicates.
_remove_duplicates(prefixed_completions, completions)
+ sorted(completions, key=lambda x: (x.name.startswith('__'),
x.name.startswith('_'),
x.name.lower()))
)
def _complete_python(self, leaf):
"""
Analyzes the current context of a completion and decides what to
return.
Technically this works by generating a parser stack and analysing the
current stack for possible grammar nodes.
Possible enhancements:
- global/nonlocal search global
- yield from / raise from <- could be only exceptions/generators
- In args: */**: no completion
- In params (also lambda): no completion before =
"""
grammar = self._inference_state.grammar
self.stack = stack = None
self._position = (
self._original_position[0],
self._original_position[1] - len(self._like_name)
)
cached_name = None
try:
self.stack = stack = helpers.get_stack_at_position(
grammar, self._code_lines, leaf, self._position
)
except helpers.OnErrorLeaf as e:
value = e.error_leaf.value
if value == '.':
# After ErrorLeaf's that are dots, we will not do any
# completions since this probably just confuses the user.
return cached_name, []
# If we don't have a value, just use global completion.
return cached_name, self._complete_global_scope()
allowed_transitions = \
list(stack._allowed_transition_names_and_token_types())
if 'if' in allowed_transitions:
leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
previous_leaf = leaf.get_previous_leaf()
indent = self._position[1]
if not (leaf.start_pos <= self._position <= leaf.end_pos):
indent = leaf.start_pos[1]
if previous_leaf is not None:
stmt = previous_leaf
while True:
stmt = search_ancestor(
stmt, 'if_stmt', 'for_stmt', 'while_stmt', 'try_stmt',
'error_node',
)
if stmt is None:
break
type_ = stmt.type
if type_ == 'error_node':
first = stmt.children[0]
if isinstance(first, Leaf):
type_ = first.value + '_stmt'
# Compare indents
if stmt.start_pos[1] == indent:
if type_ == 'if_stmt':
allowed_transitions += ['elif', 'else']
elif type_ == 'try_stmt':
allowed_transitions += ['except', 'finally', 'else']
elif type_ == 'for_stmt':
allowed_transitions.append('else')
completion_names = []
kwargs_only = False
if any(t in allowed_transitions for t in (PythonTokenTypes.NAME,
PythonTokenTypes.INDENT)):
# This means that we actually have to do type inference.
nonterminals = [stack_node.nonterminal for stack_node in stack]
nodes = _gather_nodes(stack)
if nodes and nodes[-1] in ('as', 'def', 'class'):
# No completions for ``with x as foo`` and ``import x as foo``.
# Also true for defining names as a class or function.
return cached_name, list(self._complete_inherited(is_function=True))
elif "import_stmt" in nonterminals:
level, names = parse_dotted_names(nodes, "import_from" in nonterminals)
only_modules = not ("import_from" in nonterminals and 'import' in nodes)
completion_names += self._get_importer_names(
names,
level,
only_modules=only_modules,
)
elif nonterminals[-1] in ('trailer', 'dotted_name') and nodes[-1] == '.':
dot = self._module_node.get_leaf_for_position(self._position)
if dot.type == "endmarker":
# This is a bit of a weird edge case, maybe we can somehow
# generalize this.
dot = leaf.get_previous_leaf()
cached_name, n = self._complete_trailer(dot.get_previous_leaf())
completion_names += n
elif self._is_parameter_completion():
completion_names += self._complete_params(leaf)
else:
# Apparently this looks like it's good enough to filter most cases
# so that signature completions don't randomly appear.
# To understand why this works, three things are important:
# 1. trailer with a `,` in it is either a subscript or an arglist.
# 2. If there's no `,`, it's at the start and only signatures start
# with `(`. Other trailers could start with `.` or `[`.
# 3. Decorators are very primitive and have an optional `(` with
# optional arglist in them.
if nodes[-1] in ['(', ','] \
and nonterminals[-1] in ('trailer', 'arglist', 'decorator'):
signatures = self._signatures_callback(*self._position)
if signatures:
call_details = signatures[0]._call_details
used_kwargs = list(call_details.iter_used_keyword_arguments())
positional_count = call_details.count_positional_arguments()
completion_names += _get_signature_param_names(
signatures,
positional_count,
used_kwargs,
)
kwargs_only = _must_be_kwarg(signatures, positional_count, used_kwargs)
if not kwargs_only:
completion_names += self._complete_global_scope()
completion_names += self._complete_inherited(is_function=False)
if not kwargs_only:
current_line = self._code_lines[self._position[0] - 1][:self._position[1]]
completion_names += self._complete_keywords(
allowed_transitions,
only_values=not (not current_line or current_line[-1] in ' \t.;'
and current_line[-3:] != '...')
)
return cached_name, completion_names
def _is_parameter_completion(self):
tos = self.stack[-1]
if tos.nonterminal == 'lambdef' and len(tos.nodes) == 1:
# We are at the position `lambda `, where basically the next node
# is a param.
return True
if tos.nonterminal in 'parameters':
# Basically we are at the position `foo(`, there's nothing there
# yet, so we have no `typedargslist`.
return True
# var args is for lambdas and typed args for normal functions
return tos.nonterminal in ('typedargslist', 'varargslist') and tos.nodes[-1] == ','
def _complete_params(self, leaf):
stack_node = self.stack[-2]
if stack_node.nonterminal == 'parameters':
stack_node = self.stack[-3]
if stack_node.nonterminal == 'funcdef':
context = get_user_context(self._module_context, self._position)
node = search_ancestor(leaf, 'error_node', 'funcdef')
if node is not None:
if node.type == 'error_node':
n = node.children[0]
if n.type == 'decorators':
decorators = n.children
elif n.type == 'decorator':
decorators = [n]
else:
decorators = []
else:
decorators = node.get_decorators()
function_name = stack_node.nodes[1]
return complete_param_names(context, function_name.value, decorators)
return []
def _complete_keywords(self, allowed_transitions, only_values):
for k in allowed_transitions:
if isinstance(k, str) and k.isalpha():
if not only_values or k in ('True', 'False', 'None'):
yield keywords.KeywordName(self._inference_state, k)
def _complete_global_scope(self):
context = get_user_context(self._module_context, self._position)
debug.dbg('global completion scope: %s', context)
flow_scope_node = get_flow_scope_node(self._module_node, self._position)
filters = get_global_filters(
context,
self._position,
flow_scope_node
)
completion_names = []
for filter in filters:
completion_names += filter.values()
return completion_names
def _complete_trailer(self, previous_leaf):
inferred_context = self._module_context.create_context(previous_leaf)
values = infer_call_of_leaf(inferred_context, previous_leaf)
debug.dbg('trailer completion values: %s', values, color='MAGENTA')
# The cached name simply exists to make speed optimizations for certain
# modules.
cached_name = None
if len(values) == 1:
v, = values
if v.is_module():
if len(v.string_names) == 1:
module_name = v.string_names[0]
if module_name in ('numpy', 'tensorflow', 'matplotlib', 'pandas'):
cached_name = module_name
return cached_name, self._complete_trailer_for_values(values)
def _complete_trailer_for_values(self, values):
user_context = get_user_context(self._module_context, self._position)
return complete_trailer(user_context, values)
def _get_importer_names(self, names, level=0, only_modules=True):
names = [n.value for n in names]
i = imports.Importer(self._inference_state, names, self._module_context, level)
return i.completion_names(self._inference_state, only_modules=only_modules)
def _complete_inherited(self, is_function=True):
"""
Autocomplete inherited methods when overriding in child class.
"""
leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
cls = tree.search_ancestor(leaf, 'classdef')
if cls is None:
return
# Complete the methods that are defined in the super classes.
class_value = self._module_context.create_value(cls)
if cls.start_pos[1] >= leaf.start_pos[1]:
return
filters = class_value.get_filters(is_instance=True)
# The first dict is the dictionary of class itself.
next(filters)
for filter in filters:
for name in filter.values():
# TODO we should probably check here for properties
if (name.api_type == 'function') == is_function:
yield name
def _complete_in_string(self, start_leaf, string):
"""
To make it possible for people to have completions in doctests or
generally in "Python" code in docstrings, we use the following
heuristic:
- Having an indented block of code
- Having some doctest code that starts with `>>>`
- Having backticks that doesn't have whitespace inside it
"""
def iter_relevant_lines(lines):
include_next_line = False
for l in code_lines:
if include_next_line or l.startswith('>>>') or l.startswith(' '):
yield re.sub(r'^( *>>> ?| +)', '', l)
else:
yield None
include_next_line = bool(re.match(' *>>>', l))
string = dedent(string)
code_lines = split_lines(string, keepends=True)
relevant_code_lines = list(iter_relevant_lines(code_lines))
if relevant_code_lines[-1] is not None:
# Some code lines might be None, therefore get rid of that.
relevant_code_lines = ['\n' if c is None else c for c in relevant_code_lines]
return self._complete_code_lines(relevant_code_lines)
match = re.search(r'`([^`\s]+)', code_lines[-1])
if match:
return self._complete_code_lines([match.group(1)])
return []
def _complete_code_lines(self, code_lines):
module_node = self._inference_state.grammar.parse(''.join(code_lines))
module_value = DocstringModule(
in_module_context=self._module_context,
inference_state=self._inference_state,
module_node=module_node,
code_lines=code_lines,
)
return Completion(
self._inference_state,
module_value.as_context(),
code_lines=code_lines,
position=module_node.end_pos,
signatures_callback=lambda *args, **kwargs: [],
fuzzy=self._fuzzy
).complete()
def _gather_nodes(stack):
nodes = []
for stack_node in stack:
if stack_node.dfa.from_rule == 'small_stmt':
nodes = []
else:
nodes += stack_node.nodes
return nodes
_string_start = re.compile(r'^\w*(\'{3}|"{3}|\'|")')
def _extract_string_while_in_string(leaf, position):
def return_part_of_leaf(leaf):
kwargs = {}
if leaf.line == position[0]:
kwargs['endpos'] = position[1] - leaf.column
match = _string_start.match(leaf.value, **kwargs)
if not match:
return None, None, None
start = match.group(0)
if leaf.line == position[0] and position[1] < leaf.column + match.end():
return None, None, None
return cut_value_at_position(leaf, position)[match.end():], leaf, start
if position < leaf.start_pos:
return None, None, None
if leaf.type == 'string':
return return_part_of_leaf(leaf)
leaves = []
while leaf is not None:
if leaf.type == 'error_leaf' and ('"' in leaf.value or "'" in leaf.value):
if len(leaf.value) > 1:
return return_part_of_leaf(leaf)
prefix_leaf = None
if not leaf.prefix:
prefix_leaf = leaf.get_previous_leaf()
if prefix_leaf is None or prefix_leaf.type != 'name' \
or not all(c in 'rubf' for c in prefix_leaf.value.lower()):
prefix_leaf = None
return (
''.join(cut_value_at_position(l, position) for l in leaves),
prefix_leaf or leaf,
('' if prefix_leaf is None else prefix_leaf.value)
+ cut_value_at_position(leaf, position),
)
if leaf.line != position[0]:
# Multi line strings are always simple error leaves and contain the
# whole string, single line error leaves are atherefore important
# now and since the line is different, it's not really a single
# line string anymore.
break
leaves.insert(0, leaf)
leaf = leaf.get_previous_leaf()
return None, None, None
def complete_trailer(user_context, values):
completion_names = []
for value in values:
for filter in value.get_filters(origin_scope=user_context.tree_node):
completion_names += filter.values()
if not value.is_stub() and isinstance(value, TreeInstance):
completion_names += _complete_getattr(user_context, value)
python_values = convert_values(values)
for c in python_values:
if c not in values:
for filter in c.get_filters(origin_scope=user_context.tree_node):
completion_names += filter.values()
return completion_names
def _complete_getattr(user_context, instance):
"""
A heuristic to make completion for proxy objects work. This is not
intended to work in all cases. It works exactly in this case:
def __getattr__(self, name):
...
return getattr(any_object, name)
It is important that the return contains getattr directly, otherwise it
won't work anymore. It's really just a stupid heuristic. It will not
work if you write e.g. `return (getatr(o, name))`, because of the
additional parentheses. It will also not work if you move the getattr
to some other place that is not the return statement itself.
It is intentional that it doesn't work in all cases. Generally it's
really hard to do even this case (as you can see below). Most people
will write it like this anyway and the other ones, well they are just
out of luck I guess :) ~dave.
"""
names = (instance.get_function_slot_names('__getattr__')
or instance.get_function_slot_names('__getattribute__'))
functions = ValueSet.from_sets(
name.infer()
for name in names
)
for func in functions:
tree_node = func.tree_node
if tree_node is None or tree_node.type != 'funcdef':
continue
for return_stmt in tree_node.iter_return_stmts():
# Basically until the next comment we just try to find out if a
# return statement looks exactly like `return getattr(x, name)`.
if return_stmt.type != 'return_stmt':
continue
atom_expr = return_stmt.children[1]
if atom_expr.type != 'atom_expr':
continue
atom = atom_expr.children[0]
trailer = atom_expr.children[1]
if len(atom_expr.children) != 2 or atom.type != 'name' \
or atom.value != 'getattr':
continue
arglist = trailer.children[1]
if arglist.type != 'arglist' or len(arglist.children) < 3:
continue
context = func.as_context()
object_node = arglist.children[0]
# Make sure it's a param: foo in __getattr__(self, foo)
name_node = arglist.children[2]
name_list = context.goto(name_node, name_node.start_pos)
if not any(n.api_type == 'param' for n in name_list):
continue
# Now that we know that these are most probably completion
# objects, we just infer the object and return them as
# completions.
objects = context.infer_node(object_node)
return complete_trailer(user_context, objects)
return []
def search_in_module(inference_state, module_context, names, wanted_names,
wanted_type, complete=False, fuzzy=False,
ignore_imports=False, convert=False):
for s in wanted_names[:-1]:
new_names = []
for n in names:
if s == n.string_name:
if n.tree_name is not None and n.api_type in ('module', 'namespace') \
and ignore_imports:
continue
new_names += complete_trailer(
module_context,
n.infer()
)
debug.dbg('dot lookup on search %s from %s', new_names, names[:10])
names = new_names
last_name = wanted_names[-1].lower()
for n in names:
string = n.string_name.lower()
if complete and helpers.match(string, last_name, fuzzy=fuzzy) \
or not complete and string == last_name:
if isinstance(n, SubModuleName):
names = [v.name for v in n.infer()]
else:
names = [n]
if convert:
names = convert_names(names)
for n2 in names:
if complete:
def_ = classes.Completion(
inference_state, n2,
stack=None,
like_name_length=len(last_name),
is_fuzzy=fuzzy,
)
else:
def_ = classes.Name(inference_state, n2)
if not wanted_type or wanted_type == def_.type:
yield def_

View File

@ -0,0 +1,31 @@
from typing import Dict, Tuple, Callable
CacheValues = Tuple[str, str, str]
CacheValuesCallback = Callable[[], CacheValues]
_cache: Dict[str, Dict[str, CacheValues]] = {}
def save_entry(module_name: str, name: str, cache: CacheValues) -> None:
try:
module_cache = _cache[module_name]
except KeyError:
module_cache = _cache[module_name] = {}
module_cache[name] = cache
def _create_get_from_cache(number: int) -> Callable[[str, str, CacheValuesCallback], str]:
def _get_from_cache(module_name: str, name: str, get_cache_values: CacheValuesCallback) -> str:
try:
return _cache[module_name][name][number]
except KeyError:
v = get_cache_values()
save_entry(module_name, name, v)
return v[number]
return _get_from_cache
get_type = _create_get_from_cache(0)
get_docstring_signature = _create_get_from_cache(1)
get_docstring = _create_get_from_cache(2)

View File

@ -0,0 +1,467 @@
"""
Environments are a way to activate different Python versions or Virtualenvs for
static analysis. The Python binary in that environment is going to be executed.
"""
import os
import sys
import hashlib
import filecmp
from collections import namedtuple
from shutil import which
from jedi.cache import memoize_method, time_cache
from jedi.inference.compiled.subprocess import CompiledSubprocess, \
InferenceStateSameProcess, InferenceStateSubprocess
import parso
_VersionInfo = namedtuple('VersionInfo', 'major minor micro')
_SUPPORTED_PYTHONS = ['3.10', '3.9', '3.8', '3.7', '3.6']
_SAFE_PATHS = ['/usr/bin', '/usr/local/bin']
_CONDA_VAR = 'CONDA_PREFIX'
_CURRENT_VERSION = '%s.%s' % (sys.version_info.major, sys.version_info.minor)
class InvalidPythonEnvironment(Exception):
"""
If you see this exception, the Python executable or Virtualenv you have
been trying to use is probably not a correct Python version.
"""
class _BaseEnvironment:
@memoize_method
def get_grammar(self):
version_string = '%s.%s' % (self.version_info.major, self.version_info.minor)
return parso.load_grammar(version=version_string)
@property
def _sha256(self):
try:
return self._hash
except AttributeError:
self._hash = _calculate_sha256_for_file(self.executable)
return self._hash
def _get_info():
return (
sys.executable,
sys.prefix,
sys.version_info[:3],
)
class Environment(_BaseEnvironment):
"""
This class is supposed to be created by internal Jedi architecture. You
should not create it directly. Please use create_environment or the other
functions instead. It is then returned by that function.
"""
_subprocess = None
def __init__(self, executable, env_vars=None):
self._start_executable = executable
self._env_vars = env_vars
# Initialize the environment
self._get_subprocess()
def _get_subprocess(self):
if self._subprocess is not None and not self._subprocess.is_crashed:
return self._subprocess
try:
self._subprocess = CompiledSubprocess(self._start_executable,
env_vars=self._env_vars)
info = self._subprocess._send(None, _get_info)
except Exception as exc:
raise InvalidPythonEnvironment(
"Could not get version information for %r: %r" % (
self._start_executable,
exc))
# Since it could change and might not be the same(?) as the one given,
# set it here.
self.executable = info[0]
"""
The Python executable, matches ``sys.executable``.
"""
self.path = info[1]
"""
The path to an environment, matches ``sys.prefix``.
"""
self.version_info = _VersionInfo(*info[2])
"""
Like :data:`sys.version_info`: a tuple to show the current
Environment's Python version.
"""
return self._subprocess
def __repr__(self):
version = '.'.join(str(i) for i in self.version_info)
return '<%s: %s in %s>' % (self.__class__.__name__, version, self.path)
def get_inference_state_subprocess(self, inference_state):
return InferenceStateSubprocess(inference_state, self._get_subprocess())
@memoize_method
def get_sys_path(self):
"""
The sys path for this environment. Does not include potential
modifications from e.g. appending to :data:`sys.path`.
:returns: list of str
"""
# It's pretty much impossible to generate the sys path without actually
# executing Python. The sys path (when starting with -S) itself depends
# on how the Python version was compiled (ENV variables).
# If you omit -S when starting Python (normal case), additionally
# site.py gets executed.
return self._get_subprocess().get_sys_path()
class _SameEnvironmentMixin:
def __init__(self):
self._start_executable = self.executable = sys.executable
self.path = sys.prefix
self.version_info = _VersionInfo(*sys.version_info[:3])
self._env_vars = None
class SameEnvironment(_SameEnvironmentMixin, Environment):
pass
class InterpreterEnvironment(_SameEnvironmentMixin, _BaseEnvironment):
def get_inference_state_subprocess(self, inference_state):
return InferenceStateSameProcess(inference_state)
def get_sys_path(self):
return sys.path
def _get_virtual_env_from_var(env_var='VIRTUAL_ENV'):
"""Get virtualenv environment from VIRTUAL_ENV environment variable.
It uses `safe=False` with ``create_environment``, because the environment
variable is considered to be safe / controlled by the user solely.
"""
var = os.environ.get(env_var)
if var:
# Under macOS in some cases - notably when using Pipenv - the
# sys.prefix of the virtualenv is /path/to/env/bin/.. instead of
# /path/to/env so we need to fully resolve the paths in order to
# compare them.
if os.path.realpath(var) == os.path.realpath(sys.prefix):
return _try_get_same_env()
try:
return create_environment(var, safe=False)
except InvalidPythonEnvironment:
pass
def _calculate_sha256_for_file(path):
sha256 = hashlib.sha256()
with open(path, 'rb') as f:
for block in iter(lambda: f.read(filecmp.BUFSIZE), b''):
sha256.update(block)
return sha256.hexdigest()
def get_default_environment():
"""
Tries to return an active Virtualenv or conda environment.
If there is no VIRTUAL_ENV variable or no CONDA_PREFIX variable set
set it will return the latest Python version installed on the system. This
makes it possible to use as many new Python features as possible when using
autocompletion and other functionality.
:returns: :class:`.Environment`
"""
virtual_env = _get_virtual_env_from_var()
if virtual_env is not None:
return virtual_env
conda_env = _get_virtual_env_from_var(_CONDA_VAR)
if conda_env is not None:
return conda_env
return _try_get_same_env()
def _try_get_same_env():
env = SameEnvironment()
if not os.path.basename(env.executable).lower().startswith('python'):
# This tries to counter issues with embedding. In some cases (e.g.
# VIM's Python Mac/Windows, sys.executable is /foo/bar/vim. This
# happens, because for Mac a function called `_NSGetExecutablePath` is
# used and for Windows `GetModuleFileNameW`. These are both platform
# specific functions. For all other systems sys.executable should be
# alright. However here we try to generalize:
#
# 1. Check if the executable looks like python (heuristic)
# 2. In case it's not try to find the executable
# 3. In case we don't find it use an interpreter environment.
#
# The last option will always work, but leads to potential crashes of
# Jedi - which is ok, because it happens very rarely and even less,
# because the code below should work for most cases.
if os.name == 'nt':
# The first case would be a virtualenv and the second a normal
# Python installation.
checks = (r'Scripts\python.exe', 'python.exe')
else:
# For unix it looks like Python is always in a bin folder.
checks = (
'bin/python%s.%s' % (sys.version_info[0], sys.version[1]),
'bin/python%s' % (sys.version_info[0]),
'bin/python',
)
for check in checks:
guess = os.path.join(sys.exec_prefix, check)
if os.path.isfile(guess):
# Bingo - We think we have our Python.
return Environment(guess)
# It looks like there is no reasonable Python to be found.
return InterpreterEnvironment()
# If no virtualenv is found, use the environment we're already
# using.
return env
def get_cached_default_environment():
var = os.environ.get('VIRTUAL_ENV') or os.environ.get(_CONDA_VAR)
environment = _get_cached_default_environment()
# Under macOS in some cases - notably when using Pipenv - the
# sys.prefix of the virtualenv is /path/to/env/bin/.. instead of
# /path/to/env so we need to fully resolve the paths in order to
# compare them.
if var and os.path.realpath(var) != os.path.realpath(environment.path):
_get_cached_default_environment.clear_cache()
return _get_cached_default_environment()
return environment
@time_cache(seconds=10 * 60) # 10 Minutes
def _get_cached_default_environment():
try:
return get_default_environment()
except InvalidPythonEnvironment:
# It's possible that `sys.executable` is wrong. Typically happens
# when Jedi is used in an executable that embeds Python. For further
# information, have a look at:
# https://github.com/davidhalter/jedi/issues/1531
return InterpreterEnvironment()
def find_virtualenvs(paths=None, *, safe=True, use_environment_vars=True):
"""
:param paths: A list of paths in your file system to be scanned for
Virtualenvs. It will search in these paths and potentially execute the
Python binaries.
:param safe: Default True. In case this is False, it will allow this
function to execute potential `python` environments. An attacker might
be able to drop an executable in a path this function is searching by
default. If the executable has not been installed by root, it will not
be executed.
:param use_environment_vars: Default True. If True, the VIRTUAL_ENV
variable will be checked if it contains a valid VirtualEnv.
CONDA_PREFIX will be checked to see if it contains a valid conda
environment.
:yields: :class:`.Environment`
"""
if paths is None:
paths = []
_used_paths = set()
if use_environment_vars:
# Using this variable should be safe, because attackers might be
# able to drop files (via git) but not environment variables.
virtual_env = _get_virtual_env_from_var()
if virtual_env is not None:
yield virtual_env
_used_paths.add(virtual_env.path)
conda_env = _get_virtual_env_from_var(_CONDA_VAR)
if conda_env is not None:
yield conda_env
_used_paths.add(conda_env.path)
for directory in paths:
if not os.path.isdir(directory):
continue
directory = os.path.abspath(directory)
for path in os.listdir(directory):
path = os.path.join(directory, path)
if path in _used_paths:
# A path shouldn't be inferred twice.
continue
_used_paths.add(path)
try:
executable = _get_executable_path(path, safe=safe)
yield Environment(executable)
except InvalidPythonEnvironment:
pass
def find_system_environments(*, env_vars=None):
"""
Ignores virtualenvs and returns the Python versions that were installed on
your system. This might return nothing, if you're running Python e.g. from
a portable version.
The environments are sorted from latest to oldest Python version.
:yields: :class:`.Environment`
"""
for version_string in _SUPPORTED_PYTHONS:
try:
yield get_system_environment(version_string, env_vars=env_vars)
except InvalidPythonEnvironment:
pass
# TODO: this function should probably return a list of environments since
# multiple Python installations can be found on a system for the same version.
def get_system_environment(version, *, env_vars=None):
"""
Return the first Python environment found for a string of the form 'X.Y'
where X and Y are the major and minor versions of Python.
:raises: :exc:`.InvalidPythonEnvironment`
:returns: :class:`.Environment`
"""
exe = which('python' + version)
if exe:
if exe == sys.executable:
return SameEnvironment()
return Environment(exe)
if os.name == 'nt':
for exe in _get_executables_from_windows_registry(version):
try:
return Environment(exe, env_vars=env_vars)
except InvalidPythonEnvironment:
pass
raise InvalidPythonEnvironment("Cannot find executable python%s." % version)
def create_environment(path, *, safe=True, env_vars=None):
"""
Make it possible to manually create an Environment object by specifying a
Virtualenv path or an executable path and optional environment variables.
:raises: :exc:`.InvalidPythonEnvironment`
:returns: :class:`.Environment`
"""
if os.path.isfile(path):
_assert_safe(path, safe)
return Environment(path, env_vars=env_vars)
return Environment(_get_executable_path(path, safe=safe), env_vars=env_vars)
def _get_executable_path(path, safe=True):
"""
Returns None if it's not actually a virtual env.
"""
if os.name == 'nt':
python = os.path.join(path, 'Scripts', 'python.exe')
else:
python = os.path.join(path, 'bin', 'python')
if not os.path.exists(python):
raise InvalidPythonEnvironment("%s seems to be missing." % python)
_assert_safe(python, safe)
return python
def _get_executables_from_windows_registry(version):
# https://github.com/python/typeshed/pull/3794 adds winreg
import winreg # type: ignore[import]
# TODO: support Python Anaconda.
sub_keys = [
r'SOFTWARE\Python\PythonCore\{version}\InstallPath',
r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}\InstallPath',
r'SOFTWARE\Python\PythonCore\{version}-32\InstallPath',
r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}-32\InstallPath'
]
for root_key in [winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE]:
for sub_key in sub_keys:
sub_key = sub_key.format(version=version)
try:
with winreg.OpenKey(root_key, sub_key) as key:
prefix = winreg.QueryValueEx(key, '')[0]
exe = os.path.join(prefix, 'python.exe')
if os.path.isfile(exe):
yield exe
except WindowsError:
pass
def _assert_safe(executable_path, safe):
if safe and not _is_safe(executable_path):
raise InvalidPythonEnvironment(
"The python binary is potentially unsafe.")
def _is_safe(executable_path):
# Resolve sym links. A venv typically is a symlink to a known Python
# binary. Only virtualenvs copy symlinks around.
real_path = os.path.realpath(executable_path)
if _is_unix_safe_simple(real_path):
return True
# Just check the list of known Python versions. If it's not in there,
# it's likely an attacker or some Python that was not properly
# installed in the system.
for environment in find_system_environments():
if environment.executable == real_path:
return True
# If the versions don't match, just compare the binary files. If we
# don't do that, only venvs will be working and not virtualenvs.
# venvs are symlinks while virtualenvs are actual copies of the
# Python files.
# This still means that if the system Python is updated and the
# virtualenv's Python is not (which is probably never going to get
# upgraded), it will not work with Jedi. IMO that's fine, because
# people should just be using venv. ~ dave
if environment._sha256 == _calculate_sha256_for_file(real_path):
return True
return False
def _is_unix_safe_simple(real_path):
if _is_unix_admin():
# In case we are root, just be conservative and
# only execute known paths.
return any(real_path.startswith(p) for p in _SAFE_PATHS)
uid = os.stat(real_path).st_uid
# The interpreter needs to be owned by root. This means that it wasn't
# written by a user and therefore attacking Jedi is not as simple.
# The attack could look like the following:
# 1. A user clones a repository.
# 2. The repository has an innocent looking folder called foobar. jedi
# searches for the folder and executes foobar/bin/python --version if
# there's also a foobar/bin/activate.
# 3. The attacker has gained code execution, since he controls
# foobar/bin/python.
return uid == 0
def _is_unix_admin():
try:
return os.getuid() == 0
except AttributeError:
return False # Windows

View File

@ -0,0 +1,46 @@
"""
This file is about errors in Python files and not about exception handling in
Jedi.
"""
def parso_to_jedi_errors(grammar, module_node):
return [SyntaxError(e) for e in grammar.iter_errors(module_node)]
class SyntaxError:
"""
Syntax errors are generated by :meth:`.Script.get_syntax_errors`.
"""
def __init__(self, parso_error):
self._parso_error = parso_error
@property
def line(self):
"""The line where the error starts (starting with 1)."""
return self._parso_error.start_pos[0]
@property
def column(self):
"""The column where the error starts (starting with 0)."""
return self._parso_error.start_pos[1]
@property
def until_line(self):
"""The line where the error ends (starting with 1)."""
return self._parso_error.end_pos[0]
@property
def until_column(self):
"""The column where the error ends (starting with 0)."""
return self._parso_error.end_pos[1]
def get_message(self):
return self._parso_error.message
def __repr__(self):
return '<%s from=%s to=%s>' % (
self.__class__.__name__,
self._parso_error.start_pos,
self._parso_error.end_pos,
)

View File

@ -0,0 +1,31 @@
class _JediError(Exception):
pass
class InternalError(_JediError):
"""
This error might happen a subprocess is crashing. The reason for this is
usually broken C code in third party libraries. This is not a very common
thing and it is safe to use Jedi again. However using the same calls might
result in the same error again.
"""
class WrongVersion(_JediError):
"""
This error is reserved for the future, shouldn't really be happening at the
moment.
"""
class RefactoringError(_JediError):
"""
Refactorings can fail for various reasons. So if you work with refactorings
like :meth:`.Script.rename`, :meth:`.Script.inline`,
:meth:`.Script.extract_variable` and :meth:`.Script.extract_function`, make
sure to catch these. The descriptions in the errors are ususally valuable
for end users.
A typical ``RefactoringError`` would tell the user that inlining is not
possible if no name is under the cursor.
"""

View File

@ -0,0 +1,155 @@
import os
from jedi.api import classes
from jedi.api.strings import StringName, get_quote_ending
from jedi.api.helpers import match
from jedi.inference.helpers import get_str_or_none
class PathName(StringName):
api_type = 'path'
def complete_file_name(inference_state, module_context, start_leaf, quote, string,
like_name, signatures_callback, code_lines, position, fuzzy):
# First we want to find out what can actually be changed as a name.
like_name_length = len(os.path.basename(string))
addition = _get_string_additions(module_context, start_leaf)
if string.startswith('~'):
string = os.path.expanduser(string)
if addition is None:
return
string = addition + string
# Here we use basename again, because if strings are added like
# `'foo' + 'bar`, it should complete to `foobar/`.
must_start_with = os.path.basename(string)
string = os.path.dirname(string)
sigs = signatures_callback(*position)
is_in_os_path_join = sigs and all(s.full_name == 'os.path.join' for s in sigs)
if is_in_os_path_join:
to_be_added = _add_os_path_join(module_context, start_leaf, sigs[0].bracket_start)
if to_be_added is None:
is_in_os_path_join = False
else:
string = to_be_added + string
base_path = os.path.join(inference_state.project.path, string)
try:
listed = sorted(os.scandir(base_path), key=lambda e: e.name)
# OSError: [Errno 36] File name too long: '...'
except (FileNotFoundError, OSError):
return
quote_ending = get_quote_ending(quote, code_lines, position)
for entry in listed:
name = entry.name
if match(name, must_start_with, fuzzy=fuzzy):
if is_in_os_path_join or not entry.is_dir():
name += quote_ending
else:
name += os.path.sep
yield classes.Completion(
inference_state,
PathName(inference_state, name[len(must_start_with) - like_name_length:]),
stack=None,
like_name_length=like_name_length,
is_fuzzy=fuzzy,
)
def _get_string_additions(module_context, start_leaf):
def iterate_nodes():
node = addition.parent
was_addition = True
for child_node in reversed(node.children[:node.children.index(addition)]):
if was_addition:
was_addition = False
yield child_node
continue
if child_node != '+':
break
was_addition = True
addition = start_leaf.get_previous_leaf()
if addition != '+':
return ''
context = module_context.create_context(start_leaf)
return _add_strings(context, reversed(list(iterate_nodes())))
def _add_strings(context, nodes, add_slash=False):
string = ''
first = True
for child_node in nodes:
values = context.infer_node(child_node)
if len(values) != 1:
return None
c, = values
s = get_str_or_none(c)
if s is None:
return None
if not first and add_slash:
string += os.path.sep
string += s
first = False
return string
def _add_os_path_join(module_context, start_leaf, bracket_start):
def check(maybe_bracket, nodes):
if maybe_bracket.start_pos != bracket_start:
return None
if not nodes:
return ''
context = module_context.create_context(nodes[0])
return _add_strings(context, nodes, add_slash=True) or ''
if start_leaf.type == 'error_leaf':
# Unfinished string literal, like `join('`
value_node = start_leaf.parent
index = value_node.children.index(start_leaf)
if index > 0:
error_node = value_node.children[index - 1]
if error_node.type == 'error_node' and len(error_node.children) >= 2:
index = -2
if error_node.children[-1].type == 'arglist':
arglist_nodes = error_node.children[-1].children
index -= 1
else:
arglist_nodes = []
return check(error_node.children[index + 1], arglist_nodes[::2])
return None
# Maybe an arglist or some weird error case. Therefore checked below.
searched_node_child = start_leaf
while searched_node_child.parent is not None \
and searched_node_child.parent.type not in ('arglist', 'trailer', 'error_node'):
searched_node_child = searched_node_child.parent
if searched_node_child.get_first_leaf() is not start_leaf:
return None
searched_node = searched_node_child.parent
if searched_node is None:
return None
index = searched_node.children.index(searched_node_child)
arglist_nodes = searched_node.children[:index]
if searched_node.type == 'arglist':
trailer = searched_node.parent
if trailer.type == 'error_node':
trailer_index = trailer.children.index(searched_node)
assert trailer_index >= 2
assert trailer.children[trailer_index - 1] == '('
return check(trailer.children[trailer_index - 1], arglist_nodes[::2])
elif trailer.type == 'trailer':
return check(trailer.children[0], arglist_nodes[::2])
elif searched_node.type == 'trailer':
return check(searched_node.children[0], [])
elif searched_node.type == 'error_node':
# Stuff like `join(""`
return check(arglist_nodes[-1], [])

View File

@ -0,0 +1,522 @@
"""
Helpers for the API
"""
import re
from collections import namedtuple
from textwrap import dedent
from itertools import chain
from functools import wraps
from inspect import Parameter
from parso.python.parser import Parser
from parso.python import tree
from jedi.inference.base_value import NO_VALUES
from jedi.inference.syntax_tree import infer_atom
from jedi.inference.helpers import infer_call_of_leaf
from jedi.inference.compiled import get_string_value_set
from jedi.cache import signature_time_cache, memoize_method
from jedi.parser_utils import get_parent_scope
CompletionParts = namedtuple('CompletionParts', ['path', 'has_dot', 'name'])
def _start_match(string, like_name):
return string.startswith(like_name)
def _fuzzy_match(string, like_name):
if len(like_name) <= 1:
return like_name in string
pos = string.find(like_name[0])
if pos >= 0:
return _fuzzy_match(string[pos + 1:], like_name[1:])
return False
def match(string, like_name, fuzzy=False):
if fuzzy:
return _fuzzy_match(string, like_name)
else:
return _start_match(string, like_name)
def sorted_definitions(defs):
# Note: `or ''` below is required because `module_path` could be
return sorted(defs, key=lambda x: (str(x.module_path or ''),
x.line or 0,
x.column or 0,
x.name))
def get_on_completion_name(module_node, lines, position):
leaf = module_node.get_leaf_for_position(position)
if leaf is None or leaf.type in ('string', 'error_leaf'):
# Completions inside strings are a bit special, we need to parse the
# string. The same is true for comments and error_leafs.
line = lines[position[0] - 1]
# The first step of completions is to get the name
return re.search(r'(?!\d)\w+$|$', line[:position[1]]).group(0)
elif leaf.type not in ('name', 'keyword'):
return ''
return leaf.value[:position[1] - leaf.start_pos[1]]
def _get_code(code_lines, start_pos, end_pos):
# Get relevant lines.
lines = code_lines[start_pos[0] - 1:end_pos[0]]
# Remove the parts at the end of the line.
lines[-1] = lines[-1][:end_pos[1]]
# Remove first line indentation.
lines[0] = lines[0][start_pos[1]:]
return ''.join(lines)
class OnErrorLeaf(Exception):
@property
def error_leaf(self):
return self.args[0]
def _get_code_for_stack(code_lines, leaf, position):
# It might happen that we're on whitespace or on a comment. This means
# that we would not get the right leaf.
if leaf.start_pos >= position:
# If we're not on a comment simply get the previous leaf and proceed.
leaf = leaf.get_previous_leaf()
if leaf is None:
return '' # At the beginning of the file.
is_after_newline = leaf.type == 'newline'
while leaf.type == 'newline':
leaf = leaf.get_previous_leaf()
if leaf is None:
return ''
if leaf.type == 'error_leaf' or leaf.type == 'string':
if leaf.start_pos[0] < position[0]:
# On a different line, we just begin anew.
return ''
# Error leafs cannot be parsed, completion in strings is also
# impossible.
raise OnErrorLeaf(leaf)
else:
user_stmt = leaf
while True:
if user_stmt.parent.type in ('file_input', 'suite', 'simple_stmt'):
break
user_stmt = user_stmt.parent
if is_after_newline:
if user_stmt.start_pos[1] > position[1]:
# This means that it's actually a dedent and that means that we
# start without value (part of a suite).
return ''
# This is basically getting the relevant lines.
return _get_code(code_lines, user_stmt.get_start_pos_of_prefix(), position)
def get_stack_at_position(grammar, code_lines, leaf, pos):
"""
Returns the possible node names (e.g. import_from, xor_test or yield_stmt).
"""
class EndMarkerReached(Exception):
pass
def tokenize_without_endmarker(code):
# TODO This is for now not an official parso API that exists purely
# for Jedi.
tokens = grammar._tokenize(code)
for token in tokens:
if token.string == safeword:
raise EndMarkerReached()
elif token.prefix.endswith(safeword):
# This happens with comments.
raise EndMarkerReached()
elif token.string.endswith(safeword):
yield token # Probably an f-string literal that was not finished.
raise EndMarkerReached()
else:
yield token
# The code might be indedented, just remove it.
code = dedent(_get_code_for_stack(code_lines, leaf, pos))
# We use a word to tell Jedi when we have reached the start of the
# completion.
# Use Z as a prefix because it's not part of a number suffix.
safeword = 'ZZZ_USER_WANTS_TO_COMPLETE_HERE_WITH_JEDI'
code = code + ' ' + safeword
p = Parser(grammar._pgen_grammar, error_recovery=True)
try:
p.parse(tokens=tokenize_without_endmarker(code))
except EndMarkerReached:
return p.stack
raise SystemError(
"This really shouldn't happen. There's a bug in Jedi:\n%s"
% list(tokenize_without_endmarker(code))
)
def infer(inference_state, context, leaf):
if leaf.type == 'name':
return inference_state.infer(context, leaf)
parent = leaf.parent
definitions = NO_VALUES
if parent.type == 'atom':
# e.g. `(a + b)`
definitions = context.infer_node(leaf.parent)
elif parent.type == 'trailer':
# e.g. `a()`
definitions = infer_call_of_leaf(context, leaf)
elif isinstance(leaf, tree.Literal):
# e.g. `"foo"` or `1.0`
return infer_atom(context, leaf)
elif leaf.type in ('fstring_string', 'fstring_start', 'fstring_end'):
return get_string_value_set(inference_state)
return definitions
def filter_follow_imports(names, follow_builtin_imports=False):
for name in names:
if name.is_import():
new_names = list(filter_follow_imports(
name.goto(),
follow_builtin_imports=follow_builtin_imports,
))
found_builtin = False
if follow_builtin_imports:
for new_name in new_names:
if new_name.start_pos is None:
found_builtin = True
if found_builtin:
yield name
else:
yield from new_names
else:
yield name
class CallDetails:
def __init__(self, bracket_leaf, children, position):
self.bracket_leaf = bracket_leaf
self._children = children
self._position = position
@property
def index(self):
return _get_index_and_key(self._children, self._position)[0]
@property
def keyword_name_str(self):
return _get_index_and_key(self._children, self._position)[1]
@memoize_method
def _list_arguments(self):
return list(_iter_arguments(self._children, self._position))
def calculate_index(self, param_names):
positional_count = 0
used_names = set()
star_count = -1
args = self._list_arguments()
if not args:
if param_names:
return 0
else:
return None
is_kwarg = False
for i, (star_count, key_start, had_equal) in enumerate(args):
is_kwarg |= had_equal | (star_count == 2)
if star_count:
pass # For now do nothing, we don't know what's in there here.
else:
if i + 1 != len(args): # Not last
if had_equal:
used_names.add(key_start)
else:
positional_count += 1
for i, param_name in enumerate(param_names):
kind = param_name.get_kind()
if not is_kwarg:
if kind == Parameter.VAR_POSITIONAL:
return i
if kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.POSITIONAL_ONLY):
if i == positional_count:
return i
if key_start is not None and not star_count == 1 or star_count == 2:
if param_name.string_name not in used_names \
and (kind == Parameter.KEYWORD_ONLY
or kind == Parameter.POSITIONAL_OR_KEYWORD
and positional_count <= i):
if star_count:
return i
if had_equal:
if param_name.string_name == key_start:
return i
else:
if param_name.string_name.startswith(key_start):
return i
if kind == Parameter.VAR_KEYWORD:
return i
return None
def iter_used_keyword_arguments(self):
for star_count, key_start, had_equal in list(self._list_arguments()):
if had_equal and key_start:
yield key_start
def count_positional_arguments(self):
count = 0
for star_count, key_start, had_equal in self._list_arguments()[:-1]:
if star_count:
break
count += 1
return count
def _iter_arguments(nodes, position):
def remove_after_pos(name):
if name.type != 'name':
return None
return name.value[:position[1] - name.start_pos[1]]
# Returns Generator[Tuple[star_count, Optional[key_start: str], had_equal]]
nodes_before = [c for c in nodes if c.start_pos < position]
if nodes_before[-1].type == 'arglist':
yield from _iter_arguments(nodes_before[-1].children, position)
return
previous_node_yielded = False
stars_seen = 0
for i, node in enumerate(nodes_before):
if node.type == 'argument':
previous_node_yielded = True
first = node.children[0]
second = node.children[1]
if second == '=':
if second.start_pos < position:
yield 0, first.value, True
else:
yield 0, remove_after_pos(first), False
elif first in ('*', '**'):
yield len(first.value), remove_after_pos(second), False
else:
# Must be a Comprehension
first_leaf = node.get_first_leaf()
if first_leaf.type == 'name' and first_leaf.start_pos >= position:
yield 0, remove_after_pos(first_leaf), False
else:
yield 0, None, False
stars_seen = 0
elif node.type == 'testlist_star_expr':
for n in node.children[::2]:
if n.type == 'star_expr':
stars_seen = 1
n = n.children[1]
yield stars_seen, remove_after_pos(n), False
stars_seen = 0
# The count of children is even if there's a comma at the end.
previous_node_yielded = bool(len(node.children) % 2)
elif isinstance(node, tree.PythonLeaf) and node.value == ',':
if not previous_node_yielded:
yield stars_seen, '', False
stars_seen = 0
previous_node_yielded = False
elif isinstance(node, tree.PythonLeaf) and node.value in ('*', '**'):
stars_seen = len(node.value)
elif node == '=' and nodes_before[-1]:
previous_node_yielded = True
before = nodes_before[i - 1]
if before.type == 'name':
yield 0, before.value, True
else:
yield 0, None, False
# Just ignore the star that is probably a syntax error.
stars_seen = 0
if not previous_node_yielded:
if nodes_before[-1].type == 'name':
yield stars_seen, remove_after_pos(nodes_before[-1]), False
else:
yield stars_seen, '', False
def _get_index_and_key(nodes, position):
"""
Returns the amount of commas and the keyword argument string.
"""
nodes_before = [c for c in nodes if c.start_pos < position]
if nodes_before[-1].type == 'arglist':
return _get_index_and_key(nodes_before[-1].children, position)
key_str = None
last = nodes_before[-1]
if last.type == 'argument' and last.children[1] == '=' \
and last.children[1].end_pos <= position:
# Checked if the argument
key_str = last.children[0].value
elif last == '=':
key_str = nodes_before[-2].value
return nodes_before.count(','), key_str
def _get_signature_details_from_error_node(node, additional_children, position):
for index, element in reversed(list(enumerate(node.children))):
# `index > 0` means that it's a trailer and not an atom.
if element == '(' and element.end_pos <= position and index > 0:
# It's an error node, we don't want to match too much, just
# until the parentheses is enough.
children = node.children[index:]
name = element.get_previous_leaf()
if name is None:
continue
if name.type == 'name' or name.parent.type in ('trailer', 'atom'):
return CallDetails(element, children + additional_children, position)
def get_signature_details(module, position):
leaf = module.get_leaf_for_position(position, include_prefixes=True)
# It's easier to deal with the previous token than the next one in this
# case.
if leaf.start_pos >= position:
# Whitespace / comments after the leaf count towards the previous leaf.
leaf = leaf.get_previous_leaf()
if leaf is None:
return None
# Now that we know where we are in the syntax tree, we start to look at
# parents for possible function definitions.
node = leaf.parent
while node is not None:
if node.type in ('funcdef', 'classdef', 'decorated', 'async_stmt'):
# Don't show signatures if there's stuff before it that just
# makes it feel strange to have a signature.
return None
additional_children = []
for n in reversed(node.children):
if n.start_pos < position:
if n.type == 'error_node':
result = _get_signature_details_from_error_node(
n, additional_children, position
)
if result is not None:
return result
additional_children[0:0] = n.children
continue
additional_children.insert(0, n)
# Find a valid trailer
if node.type == 'trailer' and node.children[0] == '(' \
or node.type == 'decorator' and node.children[2] == '(':
# Additionally we have to check that an ending parenthesis isn't
# interpreted wrong. There are two cases:
# 1. Cursor before paren -> The current signature is good
# 2. Cursor after paren -> We need to skip the current signature
if not (leaf is node.children[-1] and position >= leaf.end_pos):
leaf = node.get_previous_leaf()
if leaf is None:
return None
return CallDetails(
node.children[0] if node.type == 'trailer' else node.children[2],
node.children,
position
)
node = node.parent
return None
@signature_time_cache("call_signatures_validity")
def cache_signatures(inference_state, context, bracket_leaf, code_lines, user_pos):
"""This function calculates the cache key."""
line_index = user_pos[0] - 1
before_cursor = code_lines[line_index][:user_pos[1]]
other_lines = code_lines[bracket_leaf.start_pos[0]:line_index]
whole = ''.join(other_lines + [before_cursor])
before_bracket = re.match(r'.*\(', whole, re.DOTALL)
module_path = context.get_root_context().py__file__()
if module_path is None:
yield None # Don't cache!
else:
yield (module_path, before_bracket, bracket_leaf.start_pos)
yield infer(
inference_state,
context,
bracket_leaf.get_previous_leaf(),
)
def validate_line_column(func):
@wraps(func)
def wrapper(self, line=None, column=None, *args, **kwargs):
line = max(len(self._code_lines), 1) if line is None else line
if not (0 < line <= len(self._code_lines)):
raise ValueError('`line` parameter is not in a valid range.')
line_string = self._code_lines[line - 1]
line_len = len(line_string)
if line_string.endswith('\r\n'):
line_len -= 2
elif line_string.endswith('\n'):
line_len -= 1
column = line_len if column is None else column
if not (0 <= column <= line_len):
raise ValueError('`column` parameter (%d) is not in a valid range '
'(0-%d) for line %d (%r).' % (
column, line_len, line, line_string))
return func(self, line, column, *args, **kwargs)
return wrapper
def get_module_names(module, all_scopes, definitions=True, references=False):
"""
Returns a dictionary with name parts as keys and their call paths as
values.
"""
def def_ref_filter(name):
is_def = name.is_definition()
return definitions and is_def or references and not is_def
names = list(chain.from_iterable(module.get_used_names().values()))
if not all_scopes:
# We have to filter all the names that don't have the module as a
# parent_scope. There's None as a parent, because nodes in the module
# node have the parent module and not suite as all the others.
# Therefore it's important to catch that case.
def is_module_scope_name(name):
parent_scope = get_parent_scope(name)
# async functions have an extra wrapper. Strip it.
if parent_scope and parent_scope.type == 'async_stmt':
parent_scope = parent_scope.parent
return parent_scope in (module, None)
names = [n for n in names if is_module_scope_name(n)]
return filter(def_ref_filter, names)
def split_search_string(name):
type, _, dotted_names = name.rpartition(' ')
if type == 'def':
type = 'function'
return type, dotted_names.split('.')

View File

@ -0,0 +1,74 @@
"""
TODO Some parts of this module are still not well documented.
"""
from jedi.inference import compiled
from jedi.inference.base_value import ValueSet
from jedi.inference.filters import ParserTreeFilter, MergedFilter
from jedi.inference.names import TreeNameDefinition
from jedi.inference.compiled import mixed
from jedi.inference.compiled.access import create_access_path
from jedi.inference.context import ModuleContext
def _create(inference_state, obj):
return compiled.create_from_access_path(
inference_state, create_access_path(inference_state, obj)
)
class NamespaceObject:
def __init__(self, dct):
self.__dict__ = dct
class MixedTreeName(TreeNameDefinition):
def infer(self):
"""
In IPython notebook it is typical that some parts of the code that is
provided was already executed. In that case if something is not properly
inferred, it should still infer from the variables it already knows.
"""
inferred = super().infer()
if not inferred:
for compiled_value in self.parent_context.mixed_values:
for f in compiled_value.get_filters():
values = ValueSet.from_sets(
n.infer() for n in f.get(self.string_name)
)
if values:
return values
return inferred
class MixedParserTreeFilter(ParserTreeFilter):
name_class = MixedTreeName
class MixedModuleContext(ModuleContext):
def __init__(self, tree_module_value, namespaces):
super().__init__(tree_module_value)
self.mixed_values = [
self._get_mixed_object(
_create(self.inference_state, NamespaceObject(n))
) for n in namespaces
]
def _get_mixed_object(self, compiled_value):
return mixed.MixedObject(
compiled_value=compiled_value,
tree_value=self._value
)
def get_filters(self, until_position=None, origin_scope=None):
yield MergedFilter(
MixedParserTreeFilter(
parent_context=self,
until_position=until_position,
origin_scope=origin_scope
),
self.get_global_filter(),
)
for mixed_object in self.mixed_values:
yield from mixed_object.get_filters(until_position, origin_scope)

View File

@ -0,0 +1,52 @@
import pydoc
from contextlib import suppress
from typing import Dict, Optional
from jedi.inference.names import AbstractArbitraryName
try:
# https://github.com/python/typeshed/pull/4351 adds pydoc_data
from pydoc_data import topics # type: ignore[import]
pydoc_topics: Optional[Dict[str, str]] = topics.topics
except ImportError:
# Python 3.6.8 embeddable does not have pydoc_data.
pydoc_topics = None
class KeywordName(AbstractArbitraryName):
api_type = 'keyword'
def py__doc__(self):
return imitate_pydoc(self.string_name)
def imitate_pydoc(string):
"""
It's not possible to get the pydoc's without starting the annoying pager
stuff.
"""
if pydoc_topics is None:
return ''
h = pydoc.help
with suppress(KeyError):
# try to access symbols
string = h.symbols[string]
string, _, related = string.partition(' ')
def get_target(s):
return h.topics.get(s, h.keywords.get(s))
while isinstance(string, str):
string = get_target(string)
try:
# is a tuple now
label, related = string
except TypeError:
return ''
try:
return pydoc_topics[label].strip() if pydoc_topics else ''
except KeyError:
return ''

View File

@ -0,0 +1,450 @@
"""
Projects are a way to handle Python projects within Jedi. For simpler plugins
you might not want to deal with projects, but if you want to give the user more
flexibility to define sys paths and Python interpreters for a project,
:class:`.Project` is the perfect way to allow for that.
Projects can be saved to disk and loaded again, to allow project definitions to
be used across repositories.
"""
import json
from pathlib import Path
from itertools import chain
from jedi import debug
from jedi.api.environment import get_cached_default_environment, create_environment
from jedi.api.exceptions import WrongVersion
from jedi.api.completion import search_in_module
from jedi.api.helpers import split_search_string, get_module_names
from jedi.inference.imports import load_module_from_path, \
load_namespace_from_path, iter_module_names
from jedi.inference.sys_path import discover_buildout_paths
from jedi.inference.cache import inference_state_as_method_param_cache
from jedi.inference.references import recurse_find_python_folders_and_files, search_in_file_ios
from jedi.file_io import FolderIO
_CONFIG_FOLDER = '.jedi'
_CONTAINS_POTENTIAL_PROJECT = \
'setup.py', '.git', '.hg', 'requirements.txt', 'MANIFEST.in', 'pyproject.toml'
_SERIALIZER_VERSION = 1
def _try_to_skip_duplicates(func):
def wrapper(*args, **kwargs):
found_tree_nodes = []
found_modules = []
for definition in func(*args, **kwargs):
tree_node = definition._name.tree_name
if tree_node is not None and tree_node in found_tree_nodes:
continue
if definition.type == 'module' and definition.module_path is not None:
if definition.module_path in found_modules:
continue
found_modules.append(definition.module_path)
yield definition
found_tree_nodes.append(tree_node)
return wrapper
def _remove_duplicates_from_path(path):
used = set()
for p in path:
if p in used:
continue
used.add(p)
yield p
class Project:
"""
Projects are a simple way to manage Python folders and define how Jedi does
import resolution. It is mostly used as a parameter to :class:`.Script`.
Additionally there are functions to search a whole project.
"""
_environment = None
@staticmethod
def _get_config_folder_path(base_path):
return base_path.joinpath(_CONFIG_FOLDER)
@staticmethod
def _get_json_path(base_path):
return Project._get_config_folder_path(base_path).joinpath('project.json')
@classmethod
def load(cls, path):
"""
Loads a project from a specific path. You should not provide the path
to ``.jedi/project.json``, but rather the path to the project folder.
:param path: The path of the directory you want to use as a project.
"""
if isinstance(path, str):
path = Path(path)
with open(cls._get_json_path(path)) as f:
version, data = json.load(f)
if version == 1:
return cls(**data)
else:
raise WrongVersion(
"The Jedi version of this project seems newer than what we can handle."
)
def save(self):
"""
Saves the project configuration in the project in ``.jedi/project.json``.
"""
data = dict(self.__dict__)
data.pop('_environment', None)
data.pop('_django', None) # TODO make django setting public?
data = {k.lstrip('_'): v for k, v in data.items()}
data['path'] = str(data['path'])
self._get_config_folder_path(self._path).mkdir(parents=True, exist_ok=True)
with open(self._get_json_path(self._path), 'w') as f:
return json.dump((_SERIALIZER_VERSION, data), f)
def __init__(
self,
path,
*,
environment_path=None,
load_unsafe_extensions=False,
sys_path=None,
added_sys_path=(),
smart_sys_path=True,
) -> None:
"""
:param path: The base path for this project.
:param environment_path: The Python executable path, typically the path
of a virtual environment.
:param load_unsafe_extensions: Default False, Loads extensions that are not in the
sys path and in the local directories. With this option enabled,
this is potentially unsafe if you clone a git repository and
analyze it's code, because those compiled extensions will be
important and therefore have execution privileges.
:param sys_path: list of str. You can override the sys path if you
want. By default the ``sys.path.`` is generated by the
environment (virtualenvs, etc).
:param added_sys_path: list of str. Adds these paths at the end of the
sys path.
:param smart_sys_path: If this is enabled (default), adds paths from
local directories. Otherwise you will have to rely on your packages
being properly configured on the ``sys.path``.
"""
if isinstance(path, str):
path = Path(path).absolute()
self._path = path
self._environment_path = environment_path
if sys_path is not None:
# Remap potential pathlib.Path entries
sys_path = list(map(str, sys_path))
self._sys_path = sys_path
self._smart_sys_path = smart_sys_path
self._load_unsafe_extensions = load_unsafe_extensions
self._django = False
# Remap potential pathlib.Path entries
self.added_sys_path = list(map(str, added_sys_path))
"""The sys path that is going to be added at the end of the """
@property
def path(self):
"""
The base path for this project.
"""
return self._path
@property
def sys_path(self):
"""
The sys path provided to this project. This can be None and in that
case will be auto generated.
"""
return self._sys_path
@property
def smart_sys_path(self):
"""
If the sys path is going to be calculated in a smart way, where
additional paths are added.
"""
return self._smart_sys_path
@property
def load_unsafe_extensions(self):
"""
Wheter the project loads unsafe extensions.
"""
return self._load_unsafe_extensions
@inference_state_as_method_param_cache()
def _get_base_sys_path(self, inference_state):
# The sys path has not been set explicitly.
sys_path = list(inference_state.environment.get_sys_path())
try:
sys_path.remove('')
except ValueError:
pass
return sys_path
@inference_state_as_method_param_cache()
def _get_sys_path(self, inference_state, add_parent_paths=True, add_init_paths=False):
"""
Keep this method private for all users of jedi. However internally this
one is used like a public method.
"""
suffixed = list(self.added_sys_path)
prefixed = []
if self._sys_path is None:
sys_path = list(self._get_base_sys_path(inference_state))
else:
sys_path = list(self._sys_path)
if self._smart_sys_path:
prefixed.append(str(self._path))
if inference_state.script_path is not None:
suffixed += map(str, discover_buildout_paths(
inference_state,
inference_state.script_path
))
if add_parent_paths:
# Collect directories in upward search by:
# 1. Skipping directories with __init__.py
# 2. Stopping immediately when above self._path
traversed = []
for parent_path in inference_state.script_path.parents:
if parent_path == self._path \
or self._path not in parent_path.parents:
break
if not add_init_paths \
and parent_path.joinpath("__init__.py").is_file():
continue
traversed.append(str(parent_path))
# AFAIK some libraries have imports like `foo.foo.bar`, which
# leads to the conclusion to by default prefer longer paths
# rather than shorter ones by default.
suffixed += reversed(traversed)
if self._django:
prefixed.append(str(self._path))
path = prefixed + sys_path + suffixed
return list(_remove_duplicates_from_path(path))
def get_environment(self):
if self._environment is None:
if self._environment_path is not None:
self._environment = create_environment(self._environment_path, safe=False)
else:
self._environment = get_cached_default_environment()
return self._environment
def search(self, string, *, all_scopes=False):
"""
Searches a name in the whole project. If the project is very big,
at some point Jedi will stop searching. However it's also very much
recommended to not exhaust the generator. Just display the first ten
results to the user.
There are currently three different search patterns:
- ``foo`` to search for a definition foo in any file or a file called
``foo.py`` or ``foo.pyi``.
- ``foo.bar`` to search for the ``foo`` and then an attribute ``bar``
in it.
- ``class foo.bar.Bar`` or ``def foo.bar.baz`` to search for a specific
API type.
:param bool all_scopes: Default False; searches not only for
definitions on the top level of a module level, but also in
functions and classes.
:yields: :class:`.Name`
"""
return self._search_func(string, all_scopes=all_scopes)
def complete_search(self, string, **kwargs):
"""
Like :meth:`.Script.search`, but completes that string. An empty string
lists all definitions in a project, so be careful with that.
:param bool all_scopes: Default False; searches not only for
definitions on the top level of a module level, but also in
functions and classes.
:yields: :class:`.Completion`
"""
return self._search_func(string, complete=True, **kwargs)
@_try_to_skip_duplicates
def _search_func(self, string, complete=False, all_scopes=False):
# Using a Script is they easiest way to get an empty module context.
from jedi import Script
s = Script('', project=self)
inference_state = s._inference_state
empty_module_context = s._get_module_context()
debug.dbg('Search for string %s, complete=%s', string, complete)
wanted_type, wanted_names = split_search_string(string)
name = wanted_names[0]
stub_folder_name = name + '-stubs'
ios = recurse_find_python_folders_and_files(FolderIO(str(self._path)))
file_ios = []
# 1. Search for modules in the current project
for folder_io, file_io in ios:
if file_io is None:
file_name = folder_io.get_base_name()
if file_name == name or file_name == stub_folder_name:
f = folder_io.get_file_io('__init__.py')
try:
m = load_module_from_path(inference_state, f).as_context()
except FileNotFoundError:
f = folder_io.get_file_io('__init__.pyi')
try:
m = load_module_from_path(inference_state, f).as_context()
except FileNotFoundError:
m = load_namespace_from_path(inference_state, folder_io).as_context()
else:
continue
else:
file_ios.append(file_io)
if Path(file_io.path).name in (name + '.py', name + '.pyi'):
m = load_module_from_path(inference_state, file_io).as_context()
else:
continue
debug.dbg('Search of a specific module %s', m)
yield from search_in_module(
inference_state,
m,
names=[m.name],
wanted_type=wanted_type,
wanted_names=wanted_names,
complete=complete,
convert=True,
ignore_imports=True,
)
# 2. Search for identifiers in the project.
for module_context in search_in_file_ios(inference_state, file_ios,
name, complete=complete):
names = get_module_names(module_context.tree_node, all_scopes=all_scopes)
names = [module_context.create_name(n) for n in names]
names = _remove_imports(names)
yield from search_in_module(
inference_state,
module_context,
names=names,
wanted_type=wanted_type,
wanted_names=wanted_names,
complete=complete,
ignore_imports=True,
)
# 3. Search for modules on sys.path
sys_path = [
p for p in self._get_sys_path(inference_state)
# Exclude folders that are handled by recursing of the Python
# folders.
if not p.startswith(str(self._path))
]
names = list(iter_module_names(inference_state, empty_module_context, sys_path))
yield from search_in_module(
inference_state,
empty_module_context,
names=names,
wanted_type=wanted_type,
wanted_names=wanted_names,
complete=complete,
convert=True,
)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._path)
def _is_potential_project(path):
for name in _CONTAINS_POTENTIAL_PROJECT:
try:
if path.joinpath(name).exists():
return True
except OSError:
continue
return False
def _is_django_path(directory):
""" Detects the path of the very well known Django library (if used) """
try:
with open(directory.joinpath('manage.py'), 'rb') as f:
return b"DJANGO_SETTINGS_MODULE" in f.read()
except (FileNotFoundError, IsADirectoryError, PermissionError):
return False
def get_default_project(path=None):
"""
If a project is not defined by the user, Jedi tries to define a project by
itself as well as possible. Jedi traverses folders until it finds one of
the following:
1. A ``.jedi/config.json``
2. One of the following files: ``setup.py``, ``.git``, ``.hg``,
``requirements.txt`` and ``MANIFEST.in``.
"""
if path is None:
path = Path.cwd()
elif isinstance(path, str):
path = Path(path)
check = path.absolute()
probable_path = None
first_no_init_file = None
for dir in chain([check], check.parents):
try:
return Project.load(dir)
except (FileNotFoundError, IsADirectoryError, PermissionError):
pass
except NotADirectoryError:
continue
if first_no_init_file is None:
if dir.joinpath('__init__.py').exists():
# In the case that a __init__.py exists, it's in 99% just a
# Python package and the project sits at least one level above.
continue
elif not dir.is_file():
first_no_init_file = dir
if _is_django_path(dir):
project = Project(dir)
project._django = True
return project
if probable_path is None and _is_potential_project(dir):
probable_path = dir
if probable_path is not None:
# TODO search for setup.py etc
return Project(probable_path)
if first_no_init_file is not None:
return Project(first_no_init_file)
curdir = path if path.is_dir() else path.parent
return Project(curdir)
def _remove_imports(names):
return [
n for n in names
if n.tree_name is None or n.api_type not in ('module', 'namespace')
]

View File

@ -0,0 +1,242 @@
import difflib
from pathlib import Path
from typing import Dict, Iterable, Tuple
from parso import split_lines
from jedi.api.exceptions import RefactoringError
EXPRESSION_PARTS = (
'or_test and_test not_test comparison '
'expr xor_expr and_expr shift_expr arith_expr term factor power atom_expr'
).split()
class ChangedFile:
def __init__(self, inference_state, from_path, to_path,
module_node, node_to_str_map):
self._inference_state = inference_state
self._from_path = from_path
self._to_path = to_path
self._module_node = module_node
self._node_to_str_map = node_to_str_map
def get_diff(self):
old_lines = split_lines(self._module_node.get_code(), keepends=True)
new_lines = split_lines(self.get_new_code(), keepends=True)
# Add a newline at the end if it's missing. Otherwise the diff will be
# very weird. A `diff -u file1 file2` would show the string:
#
# \ No newline at end of file
#
# This is not necessary IMO, because Jedi does not really play with
# newlines and the ending newline does not really matter in Python
# files. ~dave
if old_lines[-1] != '':
old_lines[-1] += '\n'
if new_lines[-1] != '':
new_lines[-1] += '\n'
project_path = self._inference_state.project.path
if self._from_path is None:
from_p = ''
else:
from_p = self._from_path.relative_to(project_path)
if self._to_path is None:
to_p = ''
else:
to_p = self._to_path.relative_to(project_path)
diff = difflib.unified_diff(
old_lines, new_lines,
fromfile=str(from_p),
tofile=str(to_p),
)
# Apparently there's a space at the end of the diff - for whatever
# reason.
return ''.join(diff).rstrip(' ')
def get_new_code(self):
return self._inference_state.grammar.refactor(self._module_node, self._node_to_str_map)
def apply(self):
if self._from_path is None:
raise RefactoringError(
'Cannot apply a refactoring on a Script with path=None'
)
with open(self._from_path, 'w', newline='') as f:
f.write(self.get_new_code())
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._from_path)
class Refactoring:
def __init__(self, inference_state, file_to_node_changes, renames=()):
self._inference_state = inference_state
self._renames = renames
self._file_to_node_changes = file_to_node_changes
def get_changed_files(self) -> Dict[Path, ChangedFile]:
def calculate_to_path(p):
if p is None:
return p
p = str(p)
for from_, to in renames:
if p.startswith(str(from_)):
p = str(to) + p[len(str(from_)):]
return Path(p)
renames = self.get_renames()
return {
path: ChangedFile(
self._inference_state,
from_path=path,
to_path=calculate_to_path(path),
module_node=next(iter(map_)).get_root_node(),
node_to_str_map=map_
) for path, map_ in sorted(self._file_to_node_changes.items())
}
def get_renames(self) -> Iterable[Tuple[Path, Path]]:
"""
Files can be renamed in a refactoring.
"""
return sorted(self._renames)
def get_diff(self):
text = ''
project_path = self._inference_state.project.path
for from_, to in self.get_renames():
text += 'rename from %s\nrename to %s\n' \
% (from_.relative_to(project_path), to.relative_to(project_path))
return text + ''.join(f.get_diff() for f in self.get_changed_files().values())
def apply(self):
"""
Applies the whole refactoring to the files, which includes renames.
"""
for f in self.get_changed_files().values():
f.apply()
for old, new in self.get_renames():
old.rename(new)
def _calculate_rename(path, new_name):
dir_ = path.parent
if path.name in ('__init__.py', '__init__.pyi'):
return dir_, dir_.parent.joinpath(new_name)
return path, dir_.joinpath(new_name + path.suffix)
def rename(inference_state, definitions, new_name):
file_renames = set()
file_tree_name_map = {}
if not definitions:
raise RefactoringError("There is no name under the cursor")
for d in definitions:
tree_name = d._name.tree_name
if d.type == 'module' and tree_name is None:
p = None if d.module_path is None else Path(d.module_path)
file_renames.add(_calculate_rename(p, new_name))
else:
# This private access is ok in a way. It's not public to
# protect Jedi users from seeing it.
if tree_name is not None:
fmap = file_tree_name_map.setdefault(d.module_path, {})
fmap[tree_name] = tree_name.prefix + new_name
return Refactoring(inference_state, file_tree_name_map, file_renames)
def inline(inference_state, names):
if not names:
raise RefactoringError("There is no name under the cursor")
if any(n.api_type in ('module', 'namespace') for n in names):
raise RefactoringError("Cannot inline imports, modules or namespaces")
if any(n.tree_name is None for n in names):
raise RefactoringError("Cannot inline builtins/extensions")
definitions = [n for n in names if n.tree_name.is_definition()]
if len(definitions) == 0:
raise RefactoringError("No definition found to inline")
if len(definitions) > 1:
raise RefactoringError("Cannot inline a name with multiple definitions")
if len(names) == 1:
raise RefactoringError("There are no references to this name")
tree_name = definitions[0].tree_name
expr_stmt = tree_name.get_definition()
if expr_stmt.type != 'expr_stmt':
type_ = dict(
funcdef='function',
classdef='class',
).get(expr_stmt.type, expr_stmt.type)
raise RefactoringError("Cannot inline a %s" % type_)
if len(expr_stmt.get_defined_names(include_setitem=True)) > 1:
raise RefactoringError("Cannot inline a statement with multiple definitions")
first_child = expr_stmt.children[1]
if first_child.type == 'annassign' and len(first_child.children) == 4:
first_child = first_child.children[2]
if first_child != '=':
if first_child.type == 'annassign':
raise RefactoringError(
'Cannot inline a statement that is defined by an annotation'
)
else:
raise RefactoringError(
'Cannot inline a statement with "%s"'
% first_child.get_code(include_prefix=False)
)
rhs = expr_stmt.get_rhs()
replace_code = rhs.get_code(include_prefix=False)
references = [n for n in names if not n.tree_name.is_definition()]
file_to_node_changes = {}
for name in references:
tree_name = name.tree_name
path = name.get_root_context().py__file__()
s = replace_code
if rhs.type == 'testlist_star_expr' \
or tree_name.parent.type in EXPRESSION_PARTS \
or tree_name.parent.type == 'trailer' \
and tree_name.parent.get_next_sibling() is not None:
s = '(' + replace_code + ')'
of_path = file_to_node_changes.setdefault(path, {})
n = tree_name
prefix = n.prefix
par = n.parent
if par.type == 'trailer' and par.children[0] == '.':
prefix = par.parent.children[0].prefix
n = par
for some_node in par.parent.children[:par.parent.children.index(par)]:
of_path[some_node] = ''
of_path[n] = prefix + s
path = definitions[0].get_root_context().py__file__()
changes = file_to_node_changes.setdefault(path, {})
changes[expr_stmt] = _remove_indent_of_prefix(expr_stmt.get_first_leaf().prefix)
next_leaf = expr_stmt.get_next_leaf()
# Most of the time we have to remove the newline at the end of the
# statement, but if there's a comment we might not need to.
if next_leaf.prefix.strip(' \t') == '' \
and (next_leaf.type == 'newline' or next_leaf == ';'):
changes[next_leaf] = ''
return Refactoring(inference_state, file_to_node_changes)
def _remove_indent_of_prefix(prefix):
r"""
Removes the last indentation of a prefix, e.g. " \n \n " becomes " \n \n".
"""
return ''.join(split_lines(prefix, keepends=True)[:-1])

View File

@ -0,0 +1,386 @@
from textwrap import dedent
from parso import split_lines
from jedi import debug
from jedi.api.exceptions import RefactoringError
from jedi.api.refactoring import Refactoring, EXPRESSION_PARTS
from jedi.common import indent_block
from jedi.parser_utils import function_is_classmethod, function_is_staticmethod
_DEFINITION_SCOPES = ('suite', 'file_input')
_VARIABLE_EXCTRACTABLE = EXPRESSION_PARTS + \
('atom testlist_star_expr testlist test lambdef lambdef_nocond '
'keyword name number string fstring').split()
def extract_variable(inference_state, path, module_node, name, pos, until_pos):
nodes = _find_nodes(module_node, pos, until_pos)
debug.dbg('Extracting nodes: %s', nodes)
is_expression, message = _is_expression_with_error(nodes)
if not is_expression:
raise RefactoringError(message)
generated_code = name + ' = ' + _expression_nodes_to_string(nodes)
file_to_node_changes = {path: _replace(nodes, name, generated_code, pos)}
return Refactoring(inference_state, file_to_node_changes)
def _is_expression_with_error(nodes):
"""
Returns a tuple (is_expression, error_string).
"""
if any(node.type == 'name' and node.is_definition() for node in nodes):
return False, 'Cannot extract a name that defines something'
if nodes[0].type not in _VARIABLE_EXCTRACTABLE:
return False, 'Cannot extract a "%s"' % nodes[0].type
return True, ''
def _find_nodes(module_node, pos, until_pos):
"""
Looks up a module and tries to find the appropriate amount of nodes that
are in there.
"""
start_node = module_node.get_leaf_for_position(pos, include_prefixes=True)
if until_pos is None:
if start_node.type == 'operator':
next_leaf = start_node.get_next_leaf()
if next_leaf is not None and next_leaf.start_pos == pos:
start_node = next_leaf
if _is_not_extractable_syntax(start_node):
start_node = start_node.parent
if start_node.parent.type == 'trailer':
start_node = start_node.parent.parent
while start_node.parent.type in EXPRESSION_PARTS:
start_node = start_node.parent
nodes = [start_node]
else:
# Get the next leaf if we are at the end of a leaf
if start_node.end_pos == pos:
next_leaf = start_node.get_next_leaf()
if next_leaf is not None:
start_node = next_leaf
# Some syntax is not exactable, just use its parent
if _is_not_extractable_syntax(start_node):
start_node = start_node.parent
# Find the end
end_leaf = module_node.get_leaf_for_position(until_pos, include_prefixes=True)
if end_leaf.start_pos > until_pos:
end_leaf = end_leaf.get_previous_leaf()
if end_leaf is None:
raise RefactoringError('Cannot extract anything from that')
parent_node = start_node
while parent_node.end_pos < end_leaf.end_pos:
parent_node = parent_node.parent
nodes = _remove_unwanted_expression_nodes(parent_node, pos, until_pos)
# If the user marks just a return statement, we return the expression
# instead of the whole statement, because the user obviously wants to
# extract that part.
if len(nodes) == 1 and start_node.type in ('return_stmt', 'yield_expr'):
return [nodes[0].children[1]]
return nodes
def _replace(nodes, expression_replacement, extracted, pos,
insert_before_leaf=None, remaining_prefix=None):
# Now try to replace the nodes found with a variable and move the code
# before the current statement.
definition = _get_parent_definition(nodes[0])
if insert_before_leaf is None:
insert_before_leaf = definition.get_first_leaf()
first_node_leaf = nodes[0].get_first_leaf()
lines = split_lines(insert_before_leaf.prefix, keepends=True)
if first_node_leaf is insert_before_leaf:
if remaining_prefix is not None:
# The remaining prefix has already been calculated.
lines[:-1] = remaining_prefix
lines[-1:-1] = [indent_block(extracted, lines[-1]) + '\n']
extracted_prefix = ''.join(lines)
replacement_dct = {}
if first_node_leaf is insert_before_leaf:
replacement_dct[nodes[0]] = extracted_prefix + expression_replacement
else:
if remaining_prefix is None:
p = first_node_leaf.prefix
else:
p = remaining_prefix + _get_indentation(nodes[0])
replacement_dct[nodes[0]] = p + expression_replacement
replacement_dct[insert_before_leaf] = extracted_prefix + insert_before_leaf.value
for node in nodes[1:]:
replacement_dct[node] = ''
return replacement_dct
def _expression_nodes_to_string(nodes):
return ''.join(n.get_code(include_prefix=i != 0) for i, n in enumerate(nodes))
def _suite_nodes_to_string(nodes, pos):
n = nodes[0]
prefix, part_of_code = _split_prefix_at(n.get_first_leaf(), pos[0] - 1)
code = part_of_code + n.get_code(include_prefix=False) \
+ ''.join(n.get_code() for n in nodes[1:])
return prefix, code
def _split_prefix_at(leaf, until_line):
"""
Returns a tuple of the leaf's prefix, split at the until_line
position.
"""
# second means the second returned part
second_line_count = leaf.start_pos[0] - until_line
lines = split_lines(leaf.prefix, keepends=True)
return ''.join(lines[:-second_line_count]), ''.join(lines[-second_line_count:])
def _get_indentation(node):
return split_lines(node.get_first_leaf().prefix)[-1]
def _get_parent_definition(node):
"""
Returns the statement where a node is defined.
"""
while node is not None:
if node.parent.type in _DEFINITION_SCOPES:
return node
node = node.parent
raise NotImplementedError('We should never even get here')
def _remove_unwanted_expression_nodes(parent_node, pos, until_pos):
"""
This function makes it so for `1 * 2 + 3` you can extract `2 + 3`, even
though it is not part of the expression.
"""
typ = parent_node.type
is_suite_part = typ in ('suite', 'file_input')
if typ in EXPRESSION_PARTS or is_suite_part:
nodes = parent_node.children
for i, n in enumerate(nodes):
if n.end_pos > pos:
start_index = i
if n.type == 'operator':
start_index -= 1
break
for i, n in reversed(list(enumerate(nodes))):
if n.start_pos < until_pos:
end_index = i
if n.type == 'operator':
end_index += 1
# Something like `not foo or bar` should not be cut after not
for n2 in nodes[i:]:
if _is_not_extractable_syntax(n2):
end_index += 1
else:
break
break
nodes = nodes[start_index:end_index + 1]
if not is_suite_part:
nodes[0:1] = _remove_unwanted_expression_nodes(nodes[0], pos, until_pos)
nodes[-1:] = _remove_unwanted_expression_nodes(nodes[-1], pos, until_pos)
return nodes
return [parent_node]
def _is_not_extractable_syntax(node):
return node.type == 'operator' \
or node.type == 'keyword' and node.value not in ('None', 'True', 'False')
def extract_function(inference_state, path, module_context, name, pos, until_pos):
nodes = _find_nodes(module_context.tree_node, pos, until_pos)
assert len(nodes)
is_expression, _ = _is_expression_with_error(nodes)
context = module_context.create_context(nodes[0])
is_bound_method = context.is_bound_method()
params, return_variables = list(_find_inputs_and_outputs(module_context, context, nodes))
# Find variables
# Is a class method / method
if context.is_module():
insert_before_leaf = None # Leaf will be determined later
else:
node = _get_code_insertion_node(context.tree_node, is_bound_method)
insert_before_leaf = node.get_first_leaf()
if is_expression:
code_block = 'return ' + _expression_nodes_to_string(nodes) + '\n'
remaining_prefix = None
has_ending_return_stmt = False
else:
has_ending_return_stmt = _is_node_ending_return_stmt(nodes[-1])
if not has_ending_return_stmt:
# Find the actually used variables (of the defined ones). If none are
# used (e.g. if the range covers the whole function), return the last
# defined variable.
return_variables = list(_find_needed_output_variables(
context,
nodes[0].parent,
nodes[-1].end_pos,
return_variables
)) or [return_variables[-1]] if return_variables else []
remaining_prefix, code_block = _suite_nodes_to_string(nodes, pos)
after_leaf = nodes[-1].get_next_leaf()
first, second = _split_prefix_at(after_leaf, until_pos[0])
code_block += first
code_block = dedent(code_block)
if not has_ending_return_stmt:
output_var_str = ', '.join(return_variables)
code_block += 'return ' + output_var_str + '\n'
# Check if we have to raise RefactoringError
_check_for_non_extractables(nodes[:-1] if has_ending_return_stmt else nodes)
decorator = ''
self_param = None
if is_bound_method:
if not function_is_staticmethod(context.tree_node):
function_param_names = context.get_value().get_param_names()
if len(function_param_names):
self_param = function_param_names[0].string_name
params = [p for p in params if p != self_param]
if function_is_classmethod(context.tree_node):
decorator = '@classmethod\n'
else:
code_block += '\n'
function_code = '%sdef %s(%s):\n%s' % (
decorator,
name,
', '.join(params if self_param is None else [self_param] + params),
indent_block(code_block)
)
function_call = '%s(%s)' % (
('' if self_param is None else self_param + '.') + name,
', '.join(params)
)
if is_expression:
replacement = function_call
else:
if has_ending_return_stmt:
replacement = 'return ' + function_call + '\n'
else:
replacement = output_var_str + ' = ' + function_call + '\n'
replacement_dct = _replace(nodes, replacement, function_code, pos,
insert_before_leaf, remaining_prefix)
if not is_expression:
replacement_dct[after_leaf] = second + after_leaf.value
file_to_node_changes = {path: replacement_dct}
return Refactoring(inference_state, file_to_node_changes)
def _check_for_non_extractables(nodes):
for n in nodes:
try:
children = n.children
except AttributeError:
if n.value == 'return':
raise RefactoringError(
'Can only extract return statements if they are at the end.')
if n.value == 'yield':
raise RefactoringError('Cannot extract yield statements.')
else:
_check_for_non_extractables(children)
def _is_name_input(module_context, names, first, last):
for name in names:
if name.api_type == 'param' or not name.parent_context.is_module():
if name.get_root_context() is not module_context:
return True
if name.start_pos is None or not (first <= name.start_pos < last):
return True
return False
def _find_inputs_and_outputs(module_context, context, nodes):
first = nodes[0].start_pos
last = nodes[-1].end_pos
inputs = []
outputs = []
for name in _find_non_global_names(nodes):
if name.is_definition():
if name not in outputs:
outputs.append(name.value)
else:
if name.value not in inputs:
name_definitions = context.goto(name, name.start_pos)
if not name_definitions \
or _is_name_input(module_context, name_definitions, first, last):
inputs.append(name.value)
# Check if outputs are really needed:
return inputs, outputs
def _find_non_global_names(nodes):
for node in nodes:
try:
children = node.children
except AttributeError:
if node.type == 'name':
yield node
else:
# We only want to check foo in foo.bar
if node.type == 'trailer' and node.children[0] == '.':
continue
yield from _find_non_global_names(children)
def _get_code_insertion_node(node, is_bound_method):
if not is_bound_method or function_is_staticmethod(node):
while node.parent.type != 'file_input':
node = node.parent
while node.parent.type in ('async_funcdef', 'decorated', 'async_stmt'):
node = node.parent
return node
def _find_needed_output_variables(context, search_node, at_least_pos, return_variables):
"""
Searches everything after at_least_pos in a node and checks if any of the
return_variables are used in there and returns those.
"""
for node in search_node.children:
if node.start_pos < at_least_pos:
continue
return_variables = set(return_variables)
for name in _find_non_global_names([node]):
if not name.is_definition() and name.value in return_variables:
return_variables.remove(name.value)
yield name.value
def _is_node_ending_return_stmt(node):
t = node.type
if t == 'simple_stmt':
return _is_node_ending_return_stmt(node.children[0])
return t == 'return_stmt'

View File

@ -0,0 +1,29 @@
"""
To use Jedi completion in Python interpreter, add the following in your shell
setup (e.g., ``.bashrc``). This works only on Linux/Mac, because readline is
not available on Windows. If you still want Jedi autocompletion in your REPL,
just use IPython instead::
export PYTHONSTARTUP="$(python -m jedi repl)"
Then you will be able to use Jedi completer in your Python interpreter::
$ python
Python 3.9.2+ (default, Jul 20 2020, 22:15:08)
[GCC 4.6.1] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import os
>>> os.path.join('a', 'b').split().in<TAB> # doctest: +SKIP
..dex ..sert
"""
import jedi.utils
from jedi import __version__ as __jedi_version__
print('REPL completion using Jedi %s' % __jedi_version__)
jedi.utils.setup_readline(fuzzy=False)
del jedi
# Note: try not to do many things here, as it will contaminate global
# namespace of the interpreter.

View File

@ -0,0 +1,108 @@
"""
This module is here for string completions. This means mostly stuff where
strings are returned, like `foo = dict(bar=3); foo["ba` would complete to
`"bar"]`.
It however does the same for numbers. The difference between string completions
and other completions is mostly that this module doesn't return defined
names in a module, but pretty much an arbitrary string.
"""
import re
from jedi.inference.names import AbstractArbitraryName
from jedi.inference.helpers import infer_call_of_leaf
from jedi.api.classes import Completion
from jedi.parser_utils import cut_value_at_position
_sentinel = object()
class StringName(AbstractArbitraryName):
api_type = 'string'
is_value_name = False
def complete_dict(module_context, code_lines, leaf, position, string, fuzzy):
bracket_leaf = leaf
if bracket_leaf != '[':
bracket_leaf = leaf.get_previous_leaf()
cut_end_quote = ''
if string:
cut_end_quote = get_quote_ending(string, code_lines, position, invert_result=True)
if bracket_leaf == '[':
if string is None and leaf is not bracket_leaf:
string = cut_value_at_position(leaf, position)
context = module_context.create_context(bracket_leaf)
before_bracket_leaf = bracket_leaf.get_previous_leaf()
if before_bracket_leaf.type in ('atom', 'trailer', 'name'):
values = infer_call_of_leaf(context, before_bracket_leaf)
return list(_completions_for_dicts(
module_context.inference_state,
values,
'' if string is None else string,
cut_end_quote,
fuzzy=fuzzy,
))
return []
def _completions_for_dicts(inference_state, dicts, literal_string, cut_end_quote, fuzzy):
for dict_key in sorted(_get_python_keys(dicts), key=lambda x: repr(x)):
dict_key_str = _create_repr_string(literal_string, dict_key)
if dict_key_str.startswith(literal_string):
name = StringName(inference_state, dict_key_str[:-len(cut_end_quote) or None])
yield Completion(
inference_state,
name,
stack=None,
like_name_length=len(literal_string),
is_fuzzy=fuzzy
)
def _create_repr_string(literal_string, dict_key):
if not isinstance(dict_key, (str, bytes)) or not literal_string:
return repr(dict_key)
r = repr(dict_key)
prefix, quote = _get_string_prefix_and_quote(literal_string)
if quote is None:
return r
if quote == r[0]:
return prefix + r
return prefix + quote + r[1:-1] + quote
def _get_python_keys(dicts):
for dct in dicts:
if dct.array_type == 'dict':
for key in dct.get_key_values():
dict_key = key.get_safe_value(default=_sentinel)
if dict_key is not _sentinel:
yield dict_key
def _get_string_prefix_and_quote(string):
match = re.match(r'(\w*)("""|\'{3}|"|\')', string)
if match is None:
return None, None
return match.group(1), match.group(2)
def _matches_quote_at_position(code_lines, quote, position):
string = code_lines[position[0] - 1][position[1]:position[1] + len(quote)]
return string == quote
def get_quote_ending(string, code_lines, position, invert_result=False):
_, quote = _get_string_prefix_and_quote(string)
if quote is None:
return ''
# Add a quote only if it's not already there.
if _matches_quote_at_position(code_lines, quote, position) != invert_result:
return ''
return quote

View File

@ -0,0 +1,115 @@
"""
This caching is very important for speed and memory optimizations. There's
nothing really spectacular, just some decorators. The following cache types are
available:
- ``time_cache`` can be used to cache something for just a limited time span,
which can be useful if there's user interaction and the user cannot react
faster than a certain time.
This module is one of the reasons why |jedi| is not thread-safe. As you can see
there are global variables, which are holding the cache information. Some of
these variables are being cleaned after every API usage.
"""
import time
from functools import wraps
from typing import Any, Dict, Tuple
from jedi import settings
from parso.cache import parser_cache
_time_caches: Dict[str, Dict[Any, Tuple[float, Any]]] = {}
def clear_time_caches(delete_all: bool = False) -> None:
""" Jedi caches many things, that should be completed after each completion
finishes.
:param delete_all: Deletes also the cache that is normally not deleted,
like parser cache, which is important for faster parsing.
"""
global _time_caches
if delete_all:
for cache in _time_caches.values():
cache.clear()
parser_cache.clear()
else:
# normally just kill the expired entries, not all
for tc in _time_caches.values():
# check time_cache for expired entries
for key, (t, value) in list(tc.items()):
if t < time.time():
# delete expired entries
del tc[key]
def signature_time_cache(time_add_setting):
"""
This decorator works as follows: Call it with a setting and after that
use the function with a callable that returns the key.
But: This function is only called if the key is not available. After a
certain amount of time (`time_add_setting`) the cache is invalid.
If the given key is None, the function will not be cached.
"""
def _temp(key_func):
dct = {}
_time_caches[time_add_setting] = dct
def wrapper(*args, **kwargs):
generator = key_func(*args, **kwargs)
key = next(generator)
try:
expiry, value = dct[key]
if expiry > time.time():
return value
except KeyError:
pass
value = next(generator)
time_add = getattr(settings, time_add_setting)
if key is not None:
dct[key] = time.time() + time_add, value
return value
return wrapper
return _temp
def time_cache(seconds):
def decorator(func):
cache = {}
@wraps(func)
def wrapper(*args, **kwargs):
key = (args, frozenset(kwargs.items()))
try:
created, result = cache[key]
if time.time() < created + seconds:
return result
except KeyError:
pass
result = func(*args, **kwargs)
cache[key] = time.time(), result
return result
wrapper.clear_cache = lambda: cache.clear()
return wrapper
return decorator
def memoize_method(method):
"""A normal memoize function."""
@wraps(method)
def wrapper(self, *args, **kwargs):
cache_dict = self.__dict__.setdefault('_memoize_method_dct', {})
dct = cache_dict.setdefault(method, {})
key = (args, frozenset(kwargs.items()))
try:
return dct[key]
except KeyError:
result = method(self, *args, **kwargs)
dct[key] = result
return result
return wrapper

View File

@ -0,0 +1,24 @@
from contextlib import contextmanager
@contextmanager
def monkeypatch(obj, attribute_name, new_value):
"""
Like pytest's monkeypatch, but as a value manager.
"""
old_value = getattr(obj, attribute_name)
try:
setattr(obj, attribute_name, new_value)
yield
finally:
setattr(obj, attribute_name, old_value)
def indent_block(text, indention=' '):
"""This function indents a text block with a default of four spaces."""
temp = ''
while text and text[-1] == '\n':
temp += text[-1]
text = text[:-1]
lines = text.split('\n')
return '\n'.join(map(lambda s: indention + s, lines)) + temp

View File

@ -0,0 +1,132 @@
import os
import time
from contextlib import contextmanager
from typing import Callable, Optional
_inited = False
def _lazy_colorama_init():
"""
Lazily init colorama if necessary, not to screw up stdout if debugging is
not enabled.
This version of the function does nothing.
"""
try:
if os.name == 'nt':
# Does not work on Windows, as pyreadline and colorama interfere
raise ImportError
else:
# Use colorama for nicer console output.
from colorama import Fore, init # type: ignore[import]
from colorama import initialise
def _lazy_colorama_init(): # noqa: F811
"""
Lazily init colorama if necessary, not to screw up stdout is
debug not enabled.
This version of the function does init colorama.
"""
global _inited
if not _inited:
# pytest resets the stream at the end - causes troubles. Since
# after every output the stream is reset automatically we don't
# need this.
initialise.atexit_done = True
try:
init(strip=False)
except Exception:
# Colorama fails with initializing under vim and is buggy in
# version 0.3.6.
pass
_inited = True
except ImportError:
class Fore: # type: ignore[no-redef]
RED = ''
GREEN = ''
YELLOW = ''
MAGENTA = ''
RESET = ''
BLUE = ''
NOTICE = object()
WARNING = object()
SPEED = object()
enable_speed = False
enable_warning = False
enable_notice = False
# callback, interface: level, str
debug_function: Optional[Callable[[str, str], None]] = None
_debug_indent = 0
_start_time = time.time()
def reset_time():
global _start_time, _debug_indent
_start_time = time.time()
_debug_indent = 0
def increase_indent(func):
"""Decorator for makin """
def wrapper(*args, **kwargs):
with increase_indent_cm():
return func(*args, **kwargs)
return wrapper
@contextmanager
def increase_indent_cm(title=None, color='MAGENTA'):
global _debug_indent
if title:
dbg('Start: ' + title, color=color)
_debug_indent += 1
try:
yield
finally:
_debug_indent -= 1
if title:
dbg('End: ' + title, color=color)
def dbg(message, *args, color='GREEN'):
""" Looks at the stack, to see if a debug message should be printed. """
assert color
if debug_function and enable_notice:
i = ' ' * _debug_indent
_lazy_colorama_init()
debug_function(color, i + 'dbg: ' + message % tuple(repr(a) for a in args))
def warning(message, *args, format=True):
if debug_function and enable_warning:
i = ' ' * _debug_indent
if format:
message = message % tuple(repr(a) for a in args)
debug_function('RED', i + 'warning: ' + message)
def speed(name):
if debug_function and enable_speed:
now = time.time()
i = ' ' * _debug_indent
debug_function('YELLOW', i + 'speed: ' + '%s %s' % (name, now - _start_time))
def print_to_stdout(color, str_out):
"""
The default debug function that prints to standard out.
:param str color: A string that is an attribute of ``colorama.Fore``.
"""
col = getattr(Fore, color)
_lazy_colorama_init()
print(col + str_out + Fore.RESET)

View File

@ -0,0 +1,83 @@
import os
from parso import file_io
class AbstractFolderIO:
def __init__(self, path):
self.path = path
def get_base_name(self):
raise NotImplementedError
def list(self):
raise NotImplementedError
def get_file_io(self, name):
raise NotImplementedError
def get_parent_folder(self):
raise NotImplementedError
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.path)
class FolderIO(AbstractFolderIO):
def get_base_name(self):
return os.path.basename(self.path)
def list(self):
return os.listdir(self.path)
def get_file_io(self, name):
return FileIO(os.path.join(self.path, name))
def get_parent_folder(self):
return FolderIO(os.path.dirname(self.path))
def walk(self):
for root, dirs, files in os.walk(self.path):
root_folder_io = FolderIO(root)
original_folder_ios = [FolderIO(os.path.join(root, d)) for d in dirs]
modified_folder_ios = list(original_folder_ios)
yield (
root_folder_io,
modified_folder_ios,
[FileIO(os.path.join(root, f)) for f in files],
)
modified_iterator = iter(reversed(modified_folder_ios))
current = next(modified_iterator, None)
i = len(original_folder_ios)
for folder_io in reversed(original_folder_ios):
i -= 1 # Basically enumerate but reversed
if current is folder_io:
current = next(modified_iterator, None)
else:
del dirs[i]
class FileIOFolderMixin:
def get_parent_folder(self):
return FolderIO(os.path.dirname(self.path))
class ZipFileIO(file_io.KnownContentFileIO, FileIOFolderMixin):
"""For .zip and .egg archives"""
def __init__(self, path, code, zip_path):
super().__init__(path, code)
self._zip_path = zip_path
def get_last_modified(self):
try:
return os.path.getmtime(self._zip_path)
except (FileNotFoundError, PermissionError, NotADirectoryError):
return None
class FileIO(file_io.FileIO, FileIOFolderMixin):
pass
class KnownContentFileIO(file_io.KnownContentFileIO, FileIOFolderMixin):
pass

View File

@ -0,0 +1,198 @@
"""
Type inference of Python code in |jedi| is based on three assumptions:
* The code uses as least side effects as possible. Jedi understands certain
list/tuple/set modifications, but there's no guarantee that Jedi detects
everything (list.append in different modules for example).
* No magic is being used:
- metaclasses
- ``setattr()`` / ``__import__()``
- writing to ``globals()``, ``locals()``, ``object.__dict__``
* The programmer is not a total dick, e.g. like `this
<https://github.com/davidhalter/jedi/issues/24>`_ :-)
The actual algorithm is based on a principle I call lazy type inference. That
said, the typical entry point for static analysis is calling
``infer_expr_stmt``. There's separate logic for autocompletion in the API, the
inference_state is all about inferring an expression.
TODO this paragraph is not what jedi does anymore, it's similar, but not the
same.
Now you need to understand what follows after ``infer_expr_stmt``. Let's
make an example::
import datetime
datetime.date.toda# <-- cursor here
First of all, this module doesn't care about completion. It really just cares
about ``datetime.date``. At the end of the procedure ``infer_expr_stmt`` will
return the ``date`` class.
To *visualize* this (simplified):
- ``InferenceState.infer_expr_stmt`` doesn't do much, because there's no assignment.
- ``Context.infer_node`` cares for resolving the dotted path
- ``InferenceState.find_types`` searches for global definitions of datetime, which
it finds in the definition of an import, by scanning the syntax tree.
- Using the import logic, the datetime module is found.
- Now ``find_types`` is called again by ``infer_node`` to find ``date``
inside the datetime module.
Now what would happen if we wanted ``datetime.date.foo.bar``? Two more
calls to ``find_types``. However the second call would be ignored, because the
first one would return nothing (there's no foo attribute in ``date``).
What if the import would contain another ``ExprStmt`` like this::
from foo import bar
Date = bar.baz
Well... You get it. Just another ``infer_expr_stmt`` recursion. It's really
easy. Python can obviously get way more complicated then this. To understand
tuple assignments, list comprehensions and everything else, a lot more code had
to be written.
Jedi has been tested very well, so you can just start modifying code. It's best
to write your own test first for your "new" feature. Don't be scared of
breaking stuff. As long as the tests pass, you're most likely to be fine.
I need to mention now that lazy type inference is really good because it
only *inferes* what needs to be *inferred*. All the statements and modules
that are not used are just being ignored.
"""
import parso
from jedi.file_io import FileIO
from jedi import debug
from jedi import settings
from jedi.inference import imports
from jedi.inference import recursion
from jedi.inference.cache import inference_state_function_cache
from jedi.inference import helpers
from jedi.inference.names import TreeNameDefinition
from jedi.inference.base_value import ContextualizedNode, \
ValueSet, iterate_values
from jedi.inference.value import ClassValue, FunctionValue
from jedi.inference.syntax_tree import infer_expr_stmt, \
check_tuple_assignments, tree_name_to_values
from jedi.inference.imports import follow_error_node_imports_if_possible
from jedi.plugins import plugin_manager
class InferenceState:
def __init__(self, project, environment=None, script_path=None):
if environment is None:
environment = project.get_environment()
self.environment = environment
self.script_path = script_path
self.compiled_subprocess = environment.get_inference_state_subprocess(self)
self.grammar = environment.get_grammar()
self.latest_grammar = parso.load_grammar(version='3.7')
self.memoize_cache = {} # for memoize decorators
self.module_cache = imports.ModuleCache() # does the job of `sys.modules`.
self.stub_module_cache = {} # Dict[Tuple[str, ...], Optional[ModuleValue]]
self.compiled_cache = {} # see `inference.compiled.create()`
self.inferred_element_counts = {}
self.mixed_cache = {} # see `inference.compiled.mixed._create()`
self.analysis = []
self.dynamic_params_depth = 0
self.is_analysis = False
self.project = project
self.access_cache = {}
self.allow_descriptor_getattr = False
self.flow_analysis_enabled = True
self.reset_recursion_limitations()
def import_module(self, import_names, sys_path=None, prefer_stubs=True):
return imports.import_module_by_names(
self, import_names, sys_path, prefer_stubs=prefer_stubs)
@staticmethod
@plugin_manager.decorate()
def execute(value, arguments):
debug.dbg('execute: %s %s', value, arguments)
with debug.increase_indent_cm():
value_set = value.py__call__(arguments=arguments)
debug.dbg('execute result: %s in %s', value_set, value)
return value_set
# mypy doesn't suppport decorated propeties (https://github.com/python/mypy/issues/1362)
@property # type: ignore[misc]
@inference_state_function_cache()
def builtins_module(self):
module_name = 'builtins'
builtins_module, = self.import_module((module_name,), sys_path=())
return builtins_module
@property # type: ignore[misc]
@inference_state_function_cache()
def typing_module(self):
typing_module, = self.import_module(('typing',))
return typing_module
def reset_recursion_limitations(self):
self.recursion_detector = recursion.RecursionDetector()
self.execution_recursion_detector = recursion.ExecutionRecursionDetector(self)
def get_sys_path(self, **kwargs):
"""Convenience function"""
return self.project._get_sys_path(self, **kwargs)
def infer(self, context, name):
def_ = name.get_definition(import_name_always=True)
if def_ is not None:
type_ = def_.type
is_classdef = type_ == 'classdef'
if is_classdef or type_ == 'funcdef':
if is_classdef:
c = ClassValue(self, context, name.parent)
else:
c = FunctionValue.from_context(context, name.parent)
return ValueSet([c])
if type_ == 'expr_stmt':
is_simple_name = name.parent.type not in ('power', 'trailer')
if is_simple_name:
return infer_expr_stmt(context, def_, name)
if type_ == 'for_stmt':
container_types = context.infer_node(def_.children[3])
cn = ContextualizedNode(context, def_.children[3])
for_types = iterate_values(container_types, cn)
n = TreeNameDefinition(context, name)
return check_tuple_assignments(n, for_types)
if type_ in ('import_from', 'import_name'):
return imports.infer_import(context, name)
if type_ == 'with_stmt':
return tree_name_to_values(self, context, name)
elif type_ == 'param':
return context.py__getattribute__(name.value, position=name.end_pos)
elif type_ == 'namedexpr_test':
return context.infer_node(def_)
else:
result = follow_error_node_imports_if_possible(context, name)
if result is not None:
return result
return helpers.infer_call_of_leaf(context, name)
def parse_and_get_code(self, code=None, path=None,
use_latest_grammar=False, file_io=None, **kwargs):
if code is None:
if file_io is None:
file_io = FileIO(path)
code = file_io.read()
# We cannot just use parso, because it doesn't use errors='replace'.
code = parso.python_bytes_to_unicode(code, encoding='utf-8', errors='replace')
if len(code) > settings._cropped_file_size:
code = code[:settings._cropped_file_size]
grammar = self.latest_grammar if use_latest_grammar else self.grammar
return grammar.parse(code=code, path=path, file_io=file_io, **kwargs), code
def parse(self, *args, **kwargs):
return self.parse_and_get_code(*args, **kwargs)[0]

View File

@ -0,0 +1,213 @@
"""
Module for statical analysis.
"""
from parso.python import tree
from jedi import debug
from jedi.inference.helpers import is_string
CODES = {
'attribute-error': (1, AttributeError, 'Potential AttributeError.'),
'name-error': (2, NameError, 'Potential NameError.'),
'import-error': (3, ImportError, 'Potential ImportError.'),
'type-error-too-many-arguments': (4, TypeError, None),
'type-error-too-few-arguments': (5, TypeError, None),
'type-error-keyword-argument': (6, TypeError, None),
'type-error-multiple-values': (7, TypeError, None),
'type-error-star-star': (8, TypeError, None),
'type-error-star': (9, TypeError, None),
'type-error-operation': (10, TypeError, None),
'type-error-not-iterable': (11, TypeError, None),
'type-error-isinstance': (12, TypeError, None),
'type-error-not-subscriptable': (13, TypeError, None),
'value-error-too-many-values': (14, ValueError, None),
'value-error-too-few-values': (15, ValueError, None),
}
class Error:
def __init__(self, name, module_path, start_pos, message=None):
self.path = module_path
self._start_pos = start_pos
self.name = name
if message is None:
message = CODES[self.name][2]
self.message = message
@property
def line(self):
return self._start_pos[0]
@property
def column(self):
return self._start_pos[1]
@property
def code(self):
# The class name start
first = self.__class__.__name__[0]
return first + str(CODES[self.name][0])
def __str__(self):
return '%s:%s:%s: %s %s' % (self.path, self.line, self.column,
self.code, self.message)
def __eq__(self, other):
return (self.path == other.path and self.name == other.name
and self._start_pos == other._start_pos)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.path, self._start_pos, self.name))
def __repr__(self):
return '<%s %s: %s@%s,%s>' % (self.__class__.__name__,
self.name, self.path,
self._start_pos[0], self._start_pos[1])
class Warning(Error):
pass
def add(node_context, error_name, node, message=None, typ=Error, payload=None):
exception = CODES[error_name][1]
if _check_for_exception_catch(node_context, node, exception, payload):
return
# TODO this path is probably not right
module_context = node_context.get_root_context()
module_path = module_context.py__file__()
issue_instance = typ(error_name, module_path, node.start_pos, message)
debug.warning(str(issue_instance), format=False)
node_context.inference_state.analysis.append(issue_instance)
return issue_instance
def _check_for_setattr(instance):
"""
Check if there's any setattr method inside an instance. If so, return True.
"""
module = instance.get_root_context()
node = module.tree_node
if node is None:
# If it's a compiled module or doesn't have a tree_node
return False
try:
stmt_names = node.get_used_names()['setattr']
except KeyError:
return False
return any(node.start_pos < n.start_pos < node.end_pos
# Check if it's a function called setattr.
and not (n.parent.type == 'funcdef' and n.parent.name == n)
for n in stmt_names)
def add_attribute_error(name_context, lookup_value, name):
message = ('AttributeError: %s has no attribute %s.' % (lookup_value, name))
# Check for __getattr__/__getattribute__ existance and issue a warning
# instead of an error, if that happens.
typ = Error
if lookup_value.is_instance() and not lookup_value.is_compiled():
# TODO maybe make a warning for __getattr__/__getattribute__
if _check_for_setattr(lookup_value):
typ = Warning
payload = lookup_value, name
add(name_context, 'attribute-error', name, message, typ, payload)
def _check_for_exception_catch(node_context, jedi_name, exception, payload=None):
"""
Checks if a jedi object (e.g. `Statement`) sits inside a try/catch and
doesn't count as an error (if equal to `exception`).
Also checks `hasattr` for AttributeErrors and uses the `payload` to compare
it.
Returns True if the exception was catched.
"""
def check_match(cls, exception):
if not cls.is_class():
return False
for python_cls in exception.mro():
if cls.py__name__() == python_cls.__name__ \
and cls.parent_context.is_builtins_module():
return True
return False
def check_try_for_except(obj, exception):
# Only nodes in try
iterator = iter(obj.children)
for branch_type in iterator:
next(iterator) # The colon
suite = next(iterator)
if branch_type == 'try' \
and not (branch_type.start_pos < jedi_name.start_pos <= suite.end_pos):
return False
for node in obj.get_except_clause_tests():
if node is None:
return True # An exception block that catches everything.
else:
except_classes = node_context.infer_node(node)
for cls in except_classes:
from jedi.inference.value import iterable
if isinstance(cls, iterable.Sequence) and \
cls.array_type == 'tuple':
# multiple exceptions
for lazy_value in cls.py__iter__():
for typ in lazy_value.infer():
if check_match(typ, exception):
return True
else:
if check_match(cls, exception):
return True
def check_hasattr(node, suite):
try:
assert suite.start_pos <= jedi_name.start_pos < suite.end_pos
assert node.type in ('power', 'atom_expr')
base = node.children[0]
assert base.type == 'name' and base.value == 'hasattr'
trailer = node.children[1]
assert trailer.type == 'trailer'
arglist = trailer.children[1]
assert arglist.type == 'arglist'
from jedi.inference.arguments import TreeArguments
args = TreeArguments(node_context.inference_state, node_context, arglist)
unpacked_args = list(args.unpack())
# Arguments should be very simple
assert len(unpacked_args) == 2
# Check name
key, lazy_value = unpacked_args[1]
names = list(lazy_value.infer())
assert len(names) == 1 and is_string(names[0])
assert names[0].get_safe_value() == payload[1].value
# Check objects
key, lazy_value = unpacked_args[0]
objects = lazy_value.infer()
return payload[0] in objects
except AssertionError:
return False
obj = jedi_name
while obj is not None and not isinstance(obj, (tree.Function, tree.Class)):
if isinstance(obj, tree.Flow):
# try/except catch check
if obj.type == 'try_stmt' and check_try_for_except(obj, exception):
return True
# hasattr check
if exception == AttributeError and obj.type in ('if_stmt', 'while_stmt'):
if check_hasattr(obj.children[1], obj.children[3]):
return True
obj = obj.parent
return False

View File

@ -0,0 +1,335 @@
import re
from itertools import zip_longest
from parso.python import tree
from jedi import debug
from jedi.inference.utils import PushBackIterator
from jedi.inference import analysis
from jedi.inference.lazy_value import LazyKnownValue, LazyKnownValues, \
LazyTreeValue, get_merged_lazy_value
from jedi.inference.names import ParamName, TreeNameDefinition, AnonymousParamName
from jedi.inference.base_value import NO_VALUES, ValueSet, ContextualizedNode
from jedi.inference.value import iterable
from jedi.inference.cache import inference_state_as_method_param_cache
def try_iter_content(types, depth=0):
"""Helper method for static analysis."""
if depth > 10:
# It's possible that a loop has references on itself (especially with
# CompiledValue). Therefore don't loop infinitely.
return
for typ in types:
try:
f = typ.py__iter__
except AttributeError:
pass
else:
for lazy_value in f():
try_iter_content(lazy_value.infer(), depth + 1)
class ParamIssue(Exception):
pass
def repack_with_argument_clinic(clinic_string):
"""
Transforms a function or method with arguments to the signature that is
given as an argument clinic notation.
Argument clinic is part of CPython and used for all the functions that are
implemented in C (Python 3.7):
str.split.__text_signature__
# Results in: '($self, /, sep=None, maxsplit=-1)'
"""
def decorator(func):
def wrapper(value, arguments):
try:
args = tuple(iterate_argument_clinic(
value.inference_state,
arguments,
clinic_string,
))
except ParamIssue:
return NO_VALUES
else:
return func(value, *args)
return wrapper
return decorator
def iterate_argument_clinic(inference_state, arguments, clinic_string):
"""Uses a list with argument clinic information (see PEP 436)."""
clinic_args = list(_parse_argument_clinic(clinic_string))
iterator = PushBackIterator(arguments.unpack())
for i, (name, optional, allow_kwargs, stars) in enumerate(clinic_args):
if stars == 1:
lazy_values = []
for key, argument in iterator:
if key is not None:
iterator.push_back((key, argument))
break
lazy_values.append(argument)
yield ValueSet([iterable.FakeTuple(inference_state, lazy_values)])
lazy_values
continue
elif stars == 2:
raise NotImplementedError()
key, argument = next(iterator, (None, None))
if key is not None:
debug.warning('Keyword arguments in argument clinic are currently not supported.')
raise ParamIssue
if argument is None and not optional:
debug.warning('TypeError: %s expected at least %s arguments, got %s',
name, len(clinic_args), i)
raise ParamIssue
value_set = NO_VALUES if argument is None else argument.infer()
if not value_set and not optional:
# For the stdlib we always want values. If we don't get them,
# that's ok, maybe something is too hard to resolve, however,
# we will not proceed with the type inference of that function.
debug.warning('argument_clinic "%s" not resolvable.', name)
raise ParamIssue
yield value_set
def _parse_argument_clinic(string):
allow_kwargs = False
optional = False
while string:
# Optional arguments have to begin with a bracket. And should always be
# at the end of the arguments. This is therefore not a proper argument
# clinic implementation. `range()` for exmple allows an optional start
# value at the beginning.
match = re.match(r'(?:(?:(\[),? ?|, ?|)(\**\w+)|, ?/)\]*', string)
string = string[len(match.group(0)):]
if not match.group(2): # A slash -> allow named arguments
allow_kwargs = True
continue
optional = optional or bool(match.group(1))
word = match.group(2)
stars = word.count('*')
word = word[stars:]
yield (word, optional, allow_kwargs, stars)
if stars:
allow_kwargs = True
class _AbstractArgumentsMixin:
def unpack(self, funcdef=None):
raise NotImplementedError
def get_calling_nodes(self):
return []
class AbstractArguments(_AbstractArgumentsMixin):
context = None
argument_node = None
trailer = None
def unpack_arglist(arglist):
if arglist is None:
return
if arglist.type != 'arglist' and not (
arglist.type == 'argument' and arglist.children[0] in ('*', '**')):
yield 0, arglist
return
iterator = iter(arglist.children)
for child in iterator:
if child == ',':
continue
elif child in ('*', '**'):
c = next(iterator, None)
assert c is not None
yield len(child.value), c
elif child.type == 'argument' and \
child.children[0] in ('*', '**'):
assert len(child.children) == 2
yield len(child.children[0].value), child.children[1]
else:
yield 0, child
class TreeArguments(AbstractArguments):
def __init__(self, inference_state, context, argument_node, trailer=None):
"""
:param argument_node: May be an argument_node or a list of nodes.
"""
self.argument_node = argument_node
self.context = context
self._inference_state = inference_state
self.trailer = trailer # Can be None, e.g. in a class definition.
@classmethod
@inference_state_as_method_param_cache()
def create_cached(cls, *args, **kwargs):
return cls(*args, **kwargs)
def unpack(self, funcdef=None):
named_args = []
for star_count, el in unpack_arglist(self.argument_node):
if star_count == 1:
arrays = self.context.infer_node(el)
iterators = [_iterate_star_args(self.context, a, el, funcdef)
for a in arrays]
for values in list(zip_longest(*iterators)):
yield None, get_merged_lazy_value(
[v for v in values if v is not None]
)
elif star_count == 2:
arrays = self.context.infer_node(el)
for dct in arrays:
yield from _star_star_dict(self.context, dct, el, funcdef)
else:
if el.type == 'argument':
c = el.children
if len(c) == 3: # Keyword argument.
named_args.append((c[0].value, LazyTreeValue(self.context, c[2]),))
else: # Generator comprehension.
# Include the brackets with the parent.
sync_comp_for = el.children[1]
if sync_comp_for.type == 'comp_for':
sync_comp_for = sync_comp_for.children[1]
comp = iterable.GeneratorComprehension(
self._inference_state,
defining_context=self.context,
sync_comp_for_node=sync_comp_for,
entry_node=el.children[0],
)
yield None, LazyKnownValue(comp)
else:
yield None, LazyTreeValue(self.context, el)
# Reordering arguments is necessary, because star args sometimes appear
# after named argument, but in the actual order it's prepended.
yield from named_args
def _as_tree_tuple_objects(self):
for star_count, argument in unpack_arglist(self.argument_node):
default = None
if argument.type == 'argument':
if len(argument.children) == 3: # Keyword argument.
argument, default = argument.children[::2]
yield argument, default, star_count
def iter_calling_names_with_star(self):
for name, default, star_count in self._as_tree_tuple_objects():
# TODO this function is a bit strange. probably refactor?
if not star_count or not isinstance(name, tree.Name):
continue
yield TreeNameDefinition(self.context, name)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.argument_node)
def get_calling_nodes(self):
old_arguments_list = []
arguments = self
while arguments not in old_arguments_list:
if not isinstance(arguments, TreeArguments):
break
old_arguments_list.append(arguments)
for calling_name in reversed(list(arguments.iter_calling_names_with_star())):
names = calling_name.goto()
if len(names) != 1:
break
if isinstance(names[0], AnonymousParamName):
# Dynamic parameters should not have calling nodes, because
# they are dynamic and extremely random.
return []
if not isinstance(names[0], ParamName):
break
executed_param_name = names[0].get_executed_param_name()
arguments = executed_param_name.arguments
break
if arguments.argument_node is not None:
return [ContextualizedNode(arguments.context, arguments.argument_node)]
if arguments.trailer is not None:
return [ContextualizedNode(arguments.context, arguments.trailer)]
return []
class ValuesArguments(AbstractArguments):
def __init__(self, values_list):
self._values_list = values_list
def unpack(self, funcdef=None):
for values in self._values_list:
yield None, LazyKnownValues(values)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._values_list)
class TreeArgumentsWrapper(_AbstractArgumentsMixin):
def __init__(self, arguments):
self._wrapped_arguments = arguments
@property
def context(self):
return self._wrapped_arguments.context
@property
def argument_node(self):
return self._wrapped_arguments.argument_node
@property
def trailer(self):
return self._wrapped_arguments.trailer
def unpack(self, func=None):
raise NotImplementedError
def get_calling_nodes(self):
return self._wrapped_arguments.get_calling_nodes()
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._wrapped_arguments)
def _iterate_star_args(context, array, input_node, funcdef=None):
if not array.py__getattribute__('__iter__'):
if funcdef is not None:
# TODO this funcdef should not be needed.
m = "TypeError: %s() argument after * must be a sequence, not %s" \
% (funcdef.name.value, array)
analysis.add(context, 'type-error-star', input_node, message=m)
try:
iter_ = array.py__iter__
except AttributeError:
pass
else:
yield from iter_()
def _star_star_dict(context, array, input_node, funcdef):
from jedi.inference.value.instance import CompiledInstance
if isinstance(array, CompiledInstance) and array.name.string_name == 'dict':
# For now ignore this case. In the future add proper iterators and just
# make one call without crazy isinstance checks.
return {}
elif isinstance(array, iterable.Sequence) and array.array_type == 'dict':
return array.exact_key_items()
else:
if funcdef is not None:
m = "TypeError: %s argument after ** must be a mapping, not %s" \
% (funcdef.name.value, array)
analysis.add(context, 'type-error-star-star', input_node, message=m)
return {}

View File

@ -0,0 +1,558 @@
"""
Values are the "values" that Python would return. However Values are at the
same time also the "values" that a user is currently sitting in.
A ValueSet is typically used to specify the return of a function or any other
static analysis operation. In jedi there are always multiple returns and not
just one.
"""
from functools import reduce
from operator import add
from itertools import zip_longest
from parso.python.tree import Name
from jedi import debug
from jedi.parser_utils import clean_scope_docstring
from jedi.inference.helpers import SimpleGetItemNotFound
from jedi.inference.utils import safe_property
from jedi.inference.cache import inference_state_as_method_param_cache
from jedi.cache import memoize_method
sentinel = object()
class HasNoContext(Exception):
pass
class HelperValueMixin:
def get_root_context(self):
value = self
if value.parent_context is None:
return value.as_context()
while True:
if value.parent_context is None:
return value
value = value.parent_context
def execute(self, arguments):
return self.inference_state.execute(self, arguments=arguments)
def execute_with_values(self, *value_list):
from jedi.inference.arguments import ValuesArguments
arguments = ValuesArguments([ValueSet([value]) for value in value_list])
return self.inference_state.execute(self, arguments)
def execute_annotation(self):
return self.execute_with_values()
def gather_annotation_classes(self):
return ValueSet([self])
def merge_types_of_iterate(self, contextualized_node=None, is_async=False):
return ValueSet.from_sets(
lazy_value.infer()
for lazy_value in self.iterate(contextualized_node, is_async)
)
def _get_value_filters(self, name_or_str):
origin_scope = name_or_str if isinstance(name_or_str, Name) else None
yield from self.get_filters(origin_scope=origin_scope)
# This covers the case where a stub files are incomplete.
if self.is_stub():
from jedi.inference.gradual.conversion import convert_values
for c in convert_values(ValueSet({self})):
yield from c.get_filters()
def goto(self, name_or_str, name_context=None, analysis_errors=True):
from jedi.inference import finder
filters = self._get_value_filters(name_or_str)
names = finder.filter_name(filters, name_or_str)
debug.dbg('context.goto %s in (%s): %s', name_or_str, self, names)
return names
def py__getattribute__(self, name_or_str, name_context=None, position=None,
analysis_errors=True):
"""
:param position: Position of the last statement -> tuple of line, column
"""
if name_context is None:
name_context = self
names = self.goto(name_or_str, name_context, analysis_errors)
values = ValueSet.from_sets(name.infer() for name in names)
if not values:
n = name_or_str.value if isinstance(name_or_str, Name) else name_or_str
values = self.py__getattribute__alternatives(n)
if not names and not values and analysis_errors:
if isinstance(name_or_str, Name):
from jedi.inference import analysis
analysis.add_attribute_error(
name_context, self, name_or_str)
debug.dbg('context.names_to_types: %s -> %s', names, values)
return values
def py__await__(self):
await_value_set = self.py__getattribute__("__await__")
if not await_value_set:
debug.warning('Tried to run __await__ on value %s', self)
return await_value_set.execute_with_values()
def py__name__(self):
return self.name.string_name
def iterate(self, contextualized_node=None, is_async=False):
debug.dbg('iterate %s', self)
if is_async:
from jedi.inference.lazy_value import LazyKnownValues
# TODO if no __aiter__ values are there, error should be:
# TypeError: 'async for' requires an object with __aiter__ method, got int
return iter([
LazyKnownValues(
self.py__getattribute__('__aiter__').execute_with_values()
.py__getattribute__('__anext__').execute_with_values()
.py__getattribute__('__await__').execute_with_values()
.py__stop_iteration_returns()
) # noqa: E124
])
return self.py__iter__(contextualized_node)
def is_sub_class_of(self, class_value):
with debug.increase_indent_cm('subclass matching of %s <=> %s' % (self, class_value),
color='BLUE'):
for cls in self.py__mro__():
if cls.is_same_class(class_value):
debug.dbg('matched subclass True', color='BLUE')
return True
debug.dbg('matched subclass False', color='BLUE')
return False
def is_same_class(self, class2):
# Class matching should prefer comparisons that are not this function.
if type(class2).is_same_class != HelperValueMixin.is_same_class:
return class2.is_same_class(self)
return self == class2
@memoize_method
def as_context(self, *args, **kwargs):
return self._as_context(*args, **kwargs)
class Value(HelperValueMixin):
"""
To be implemented by subclasses.
"""
tree_node = None
# Possible values: None, tuple, list, dict and set. Here to deal with these
# very important containers.
array_type = None
api_type = 'not_defined_please_report_bug'
def __init__(self, inference_state, parent_context=None):
self.inference_state = inference_state
self.parent_context = parent_context
def py__getitem__(self, index_value_set, contextualized_node):
from jedi.inference import analysis
# TODO this value is probably not right.
analysis.add(
contextualized_node.context,
'type-error-not-subscriptable',
contextualized_node.node,
message="TypeError: '%s' object is not subscriptable" % self
)
return NO_VALUES
def py__simple_getitem__(self, index):
raise SimpleGetItemNotFound
def py__iter__(self, contextualized_node=None):
if contextualized_node is not None:
from jedi.inference import analysis
analysis.add(
contextualized_node.context,
'type-error-not-iterable',
contextualized_node.node,
message="TypeError: '%s' object is not iterable" % self)
return iter([])
def py__next__(self, contextualized_node=None):
return self.py__iter__(contextualized_node)
def get_signatures(self):
return []
def is_class(self):
return False
def is_class_mixin(self):
return False
def is_instance(self):
return False
def is_function(self):
return False
def is_module(self):
return False
def is_namespace(self):
return False
def is_compiled(self):
return False
def is_bound_method(self):
return False
def is_builtins_module(self):
return False
def py__bool__(self):
"""
Since Wrapper is a super class for classes, functions and modules,
the return value will always be true.
"""
return True
def py__doc__(self):
try:
self.tree_node.get_doc_node
except AttributeError:
return ''
else:
return clean_scope_docstring(self.tree_node)
def get_safe_value(self, default=sentinel):
if default is sentinel:
raise ValueError("There exists no safe value for value %s" % self)
return default
def execute_operation(self, other, operator):
debug.warning("%s not possible between %s and %s", operator, self, other)
return NO_VALUES
def py__call__(self, arguments):
debug.warning("no execution possible %s", self)
return NO_VALUES
def py__stop_iteration_returns(self):
debug.warning("Not possible to return the stop iterations of %s", self)
return NO_VALUES
def py__getattribute__alternatives(self, name_or_str):
"""
For now a way to add values in cases like __getattr__.
"""
return NO_VALUES
def py__get__(self, instance, class_value):
debug.warning("No __get__ defined on %s", self)
return ValueSet([self])
def py__get__on_class(self, calling_instance, instance, class_value):
return NotImplemented
def get_qualified_names(self):
# Returns Optional[Tuple[str, ...]]
return None
def is_stub(self):
# The root value knows if it's a stub or not.
return self.parent_context.is_stub()
def _as_context(self):
raise HasNoContext
@property
def name(self):
raise NotImplementedError
def get_type_hint(self, add_class_info=True):
return None
def infer_type_vars(self, value_set):
"""
When the current instance represents a type annotation, this method
tries to find information about undefined type vars and returns a dict
from type var name to value set.
This is for example important to understand what `iter([1])` returns.
According to typeshed, `iter` returns an `Iterator[_T]`:
def iter(iterable: Iterable[_T]) -> Iterator[_T]: ...
This functions would generate `int` for `_T` in this case, because it
unpacks the `Iterable`.
Parameters
----------
`self`: represents the annotation of the current parameter to infer the
value for. In the above example, this would initially be the
`Iterable[_T]` of the `iterable` parameter and then, when recursing,
just the `_T` generic parameter.
`value_set`: represents the actual argument passed to the parameter
we're inferrined for, or (for recursive calls) their types. In the
above example this would first be the representation of the list
`[1]` and then, when recursing, just of `1`.
"""
return {}
def iterate_values(values, contextualized_node=None, is_async=False):
"""
Calls `iterate`, on all values but ignores the ordering and just returns
all values that the iterate functions yield.
"""
return ValueSet.from_sets(
lazy_value.infer()
for lazy_value in values.iterate(contextualized_node, is_async=is_async)
)
class _ValueWrapperBase(HelperValueMixin):
@safe_property
def name(self):
from jedi.inference.names import ValueName
wrapped_name = self._wrapped_value.name
if wrapped_name.tree_name is not None:
return ValueName(self, wrapped_name.tree_name)
else:
from jedi.inference.compiled import CompiledValueName
return CompiledValueName(self, wrapped_name.string_name)
@classmethod
@inference_state_as_method_param_cache()
def create_cached(cls, inference_state, *args, **kwargs):
return cls(*args, **kwargs)
def __getattr__(self, name):
assert name != '_wrapped_value', 'Problem with _get_wrapped_value'
return getattr(self._wrapped_value, name)
class LazyValueWrapper(_ValueWrapperBase):
@safe_property
@memoize_method
def _wrapped_value(self):
with debug.increase_indent_cm('Resolve lazy value wrapper'):
return self._get_wrapped_value()
def __repr__(self):
return '<%s>' % (self.__class__.__name__)
def _get_wrapped_value(self):
raise NotImplementedError
class ValueWrapper(_ValueWrapperBase):
def __init__(self, wrapped_value):
self._wrapped_value = wrapped_value
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._wrapped_value)
class TreeValue(Value):
def __init__(self, inference_state, parent_context, tree_node):
super().__init__(inference_state, parent_context)
self.tree_node = tree_node
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.tree_node)
class ContextualizedNode:
def __init__(self, context, node):
self.context = context
self.node = node
def get_root_context(self):
return self.context.get_root_context()
def infer(self):
return self.context.infer_node(self.node)
def __repr__(self):
return '<%s: %s in %s>' % (self.__class__.__name__, self.node, self.context)
def _getitem(value, index_values, contextualized_node):
# The actual getitem call.
result = NO_VALUES
unused_values = set()
for index_value in index_values:
index = index_value.get_safe_value(default=None)
if type(index) in (float, int, str, slice, bytes):
try:
result |= value.py__simple_getitem__(index)
continue
except SimpleGetItemNotFound:
pass
unused_values.add(index_value)
# The index was somehow not good enough or simply a wrong type.
# Therefore we now iterate through all the values and just take
# all results.
if unused_values or not index_values:
result |= value.py__getitem__(
ValueSet(unused_values),
contextualized_node
)
debug.dbg('py__getitem__ result: %s', result)
return result
class ValueSet:
def __init__(self, iterable):
self._set = frozenset(iterable)
for value in iterable:
assert not isinstance(value, ValueSet)
@classmethod
def _from_frozen_set(cls, frozenset_):
self = cls.__new__(cls)
self._set = frozenset_
return self
@classmethod
def from_sets(cls, sets):
"""
Used to work with an iterable of set.
"""
aggregated = set()
for set_ in sets:
if isinstance(set_, ValueSet):
aggregated |= set_._set
else:
aggregated |= frozenset(set_)
return cls._from_frozen_set(frozenset(aggregated))
def __or__(self, other):
return self._from_frozen_set(self._set | other._set)
def __and__(self, other):
return self._from_frozen_set(self._set & other._set)
def __iter__(self):
return iter(self._set)
def __bool__(self):
return bool(self._set)
def __len__(self):
return len(self._set)
def __repr__(self):
return 'S{%s}' % (', '.join(str(s) for s in self._set))
def filter(self, filter_func):
return self.__class__(filter(filter_func, self._set))
def __getattr__(self, name):
def mapper(*args, **kwargs):
return self.from_sets(
getattr(value, name)(*args, **kwargs)
for value in self._set
)
return mapper
def __eq__(self, other):
return self._set == other._set
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._set)
def py__class__(self):
return ValueSet(c.py__class__() for c in self._set)
def iterate(self, contextualized_node=None, is_async=False):
from jedi.inference.lazy_value import get_merged_lazy_value
type_iters = [c.iterate(contextualized_node, is_async=is_async) for c in self._set]
for lazy_values in zip_longest(*type_iters):
yield get_merged_lazy_value(
[l for l in lazy_values if l is not None]
)
def execute(self, arguments):
return ValueSet.from_sets(c.inference_state.execute(c, arguments) for c in self._set)
def execute_with_values(self, *args, **kwargs):
return ValueSet.from_sets(c.execute_with_values(*args, **kwargs) for c in self._set)
def goto(self, *args, **kwargs):
return reduce(add, [c.goto(*args, **kwargs) for c in self._set], [])
def py__getattribute__(self, *args, **kwargs):
return ValueSet.from_sets(c.py__getattribute__(*args, **kwargs) for c in self._set)
def get_item(self, *args, **kwargs):
return ValueSet.from_sets(_getitem(c, *args, **kwargs) for c in self._set)
def try_merge(self, function_name):
value_set = self.__class__([])
for c in self._set:
try:
method = getattr(c, function_name)
except AttributeError:
pass
else:
value_set |= method()
return value_set
def gather_annotation_classes(self):
return ValueSet.from_sets([c.gather_annotation_classes() for c in self._set])
def get_signatures(self):
return [sig for c in self._set for sig in c.get_signatures()]
def get_type_hint(self, add_class_info=True):
t = [v.get_type_hint(add_class_info=add_class_info) for v in self._set]
type_hints = sorted(filter(None, t))
if len(type_hints) == 1:
return type_hints[0]
optional = 'None' in type_hints
if optional:
type_hints.remove('None')
if len(type_hints) == 0:
return None
elif len(type_hints) == 1:
s = type_hints[0]
else:
s = 'Union[%s]' % ', '.join(type_hints)
if optional:
s = 'Optional[%s]' % s
return s
def infer_type_vars(self, value_set):
# Circular
from jedi.inference.gradual.annotation import merge_type_var_dicts
type_var_dict = {}
for value in self._set:
merge_type_var_dicts(
type_var_dict,
value.infer_type_vars(value_set),
)
return type_var_dict
NO_VALUES = ValueSet([])
def iterator_to_value_set(func):
def wrapper(*args, **kwargs):
return ValueSet(func(*args, **kwargs))
return wrapper

View File

@ -0,0 +1,126 @@
"""
- the popular ``_memoize_default`` works like a typical memoize and returns the
default otherwise.
- ``CachedMetaClass`` uses ``_memoize_default`` to do the same with classes.
"""
from functools import wraps
from jedi import debug
_NO_DEFAULT = object()
_RECURSION_SENTINEL = object()
def _memoize_default(default=_NO_DEFAULT, inference_state_is_first_arg=False,
second_arg_is_inference_state=False):
""" This is a typical memoization decorator, BUT there is one difference:
To prevent recursion it sets defaults.
Preventing recursion is in this case the much bigger use than speed. I
don't think, that there is a big speed difference, but there are many cases
where recursion could happen (think about a = b; b = a).
"""
def func(function):
def wrapper(obj, *args, **kwargs):
# TODO These checks are kind of ugly and slow.
if inference_state_is_first_arg:
cache = obj.memoize_cache
elif second_arg_is_inference_state:
cache = args[0].memoize_cache # needed for meta classes
else:
cache = obj.inference_state.memoize_cache
try:
memo = cache[function]
except KeyError:
cache[function] = memo = {}
key = (obj, args, frozenset(kwargs.items()))
if key in memo:
return memo[key]
else:
if default is not _NO_DEFAULT:
memo[key] = default
rv = function(obj, *args, **kwargs)
memo[key] = rv
return rv
return wrapper
return func
def inference_state_function_cache(default=_NO_DEFAULT):
def decorator(func):
return _memoize_default(default=default, inference_state_is_first_arg=True)(func)
return decorator
def inference_state_method_cache(default=_NO_DEFAULT):
def decorator(func):
return _memoize_default(default=default)(func)
return decorator
def inference_state_as_method_param_cache():
def decorator(call):
return _memoize_default(second_arg_is_inference_state=True)(call)
return decorator
class CachedMetaClass(type):
"""
This is basically almost the same than the decorator above, it just caches
class initializations. Either you do it this way or with decorators, but
with decorators you lose class access (isinstance, etc).
"""
@inference_state_as_method_param_cache()
def __call__(self, *args, **kwargs):
return super().__call__(*args, **kwargs)
def inference_state_method_generator_cache():
"""
This is a special memoizer. It memoizes generators and also checks for
recursion errors and returns no further iterator elemends in that case.
"""
def func(function):
@wraps(function)
def wrapper(obj, *args, **kwargs):
cache = obj.inference_state.memoize_cache
try:
memo = cache[function]
except KeyError:
cache[function] = memo = {}
key = (obj, args, frozenset(kwargs.items()))
if key in memo:
actual_generator, cached_lst = memo[key]
else:
actual_generator = function(obj, *args, **kwargs)
cached_lst = []
memo[key] = actual_generator, cached_lst
i = 0
while True:
try:
next_element = cached_lst[i]
if next_element is _RECURSION_SENTINEL:
debug.warning('Found a generator recursion for %s' % obj)
# This means we have hit a recursion.
return
except IndexError:
cached_lst.append(_RECURSION_SENTINEL)
next_element = next(actual_generator, None)
if next_element is None:
cached_lst.pop()
return
cached_lst[-1] = next_element
yield next_element
i += 1
return wrapper
return func

View File

@ -0,0 +1,70 @@
# This file also re-exports symbols for wider use. We configure mypy and flake8
# to be aware that this file does this.
from jedi.inference.compiled.value import CompiledValue, CompiledName, \
CompiledValueFilter, CompiledValueName, create_from_access_path
from jedi.inference.base_value import LazyValueWrapper
def builtin_from_name(inference_state, string):
typing_builtins_module = inference_state.builtins_module
if string in ('None', 'True', 'False'):
builtins, = typing_builtins_module.non_stub_value_set
filter_ = next(builtins.get_filters())
else:
filter_ = next(typing_builtins_module.get_filters())
name, = filter_.get(string)
value, = name.infer()
return value
class ExactValue(LazyValueWrapper):
"""
This class represents exact values, that makes operations like additions
and exact boolean values possible, while still being a "normal" stub.
"""
def __init__(self, compiled_value):
self.inference_state = compiled_value.inference_state
self._compiled_value = compiled_value
def __getattribute__(self, name):
if name in ('get_safe_value', 'execute_operation', 'access_handle',
'negate', 'py__bool__', 'is_compiled'):
return getattr(self._compiled_value, name)
return super().__getattribute__(name)
def _get_wrapped_value(self):
instance, = builtin_from_name(
self.inference_state, self._compiled_value.name.string_name).execute_with_values()
return instance
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._compiled_value)
def create_simple_object(inference_state, obj):
"""
Only allows creations of objects that are easily picklable across Python
versions.
"""
assert type(obj) in (int, float, str, bytes, slice, complex, bool), repr(obj)
compiled_value = create_from_access_path(
inference_state,
inference_state.compiled_subprocess.create_simple_object(obj)
)
return ExactValue(compiled_value)
def get_string_value_set(inference_state):
return builtin_from_name(inference_state, 'str').execute_with_values()
def load_module(inference_state, dotted_name, **kwargs):
# Temporary, some tensorflow builtins cannot be loaded, so it's tried again
# and again and it's really slow.
if dotted_name.startswith('tensorflow.'):
return None
access_path = inference_state.compiled_subprocess.load_module(dotted_name=dotted_name, **kwargs)
if access_path is None:
return None
return create_from_access_path(inference_state, access_path)

View File

@ -0,0 +1,558 @@
import inspect
import types
import traceback
import sys
import operator as op
from collections import namedtuple
import warnings
import re
import builtins
import typing
from pathlib import Path
from typing import Optional
from jedi.inference.compiled.getattr_static import getattr_static
ALLOWED_GETITEM_TYPES = (str, list, tuple, bytes, bytearray, dict)
MethodDescriptorType = type(str.replace)
# These are not considered classes and access is granted even though they have
# a __class__ attribute.
NOT_CLASS_TYPES = (
types.BuiltinFunctionType,
types.CodeType,
types.FrameType,
types.FunctionType,
types.GeneratorType,
types.GetSetDescriptorType,
types.LambdaType,
types.MemberDescriptorType,
types.MethodType,
types.ModuleType,
types.TracebackType,
MethodDescriptorType,
types.MappingProxyType,
types.SimpleNamespace,
types.DynamicClassAttribute,
)
# Those types don't exist in typing.
MethodDescriptorType = type(str.replace)
WrapperDescriptorType = type(set.__iter__)
# `object.__subclasshook__` is an already executed descriptor.
object_class_dict = type.__dict__["__dict__"].__get__(object)
ClassMethodDescriptorType = type(object_class_dict['__subclasshook__'])
_sentinel = object()
# Maps Python syntax to the operator module.
COMPARISON_OPERATORS = {
'==': op.eq,
'!=': op.ne,
'is': op.is_,
'is not': op.is_not,
'<': op.lt,
'<=': op.le,
'>': op.gt,
'>=': op.ge,
}
_OPERATORS = {
'+': op.add,
'-': op.sub,
}
_OPERATORS.update(COMPARISON_OPERATORS)
ALLOWED_DESCRIPTOR_ACCESS = (
types.FunctionType,
types.GetSetDescriptorType,
types.MemberDescriptorType,
MethodDescriptorType,
WrapperDescriptorType,
ClassMethodDescriptorType,
staticmethod,
classmethod,
)
def safe_getattr(obj, name, default=_sentinel):
try:
attr, is_get_descriptor = getattr_static(obj, name)
except AttributeError:
if default is _sentinel:
raise
return default
else:
if isinstance(attr, ALLOWED_DESCRIPTOR_ACCESS):
# In case of descriptors that have get methods we cannot return
# it's value, because that would mean code execution.
# Since it's an isinstance call, code execution is still possible,
# but this is not really a security feature, but much more of a
# safety feature. Code execution is basically always possible when
# a module is imported. This is here so people don't shoot
# themselves in the foot.
return getattr(obj, name)
return attr
SignatureParam = namedtuple(
'SignatureParam',
'name has_default default default_string has_annotation annotation annotation_string kind_name'
)
def shorten_repr(func):
def wrapper(self):
r = func(self)
if len(r) > 50:
r = r[:50] + '..'
return r
return wrapper
def create_access(inference_state, obj):
return inference_state.compiled_subprocess.get_or_create_access_handle(obj)
def load_module(inference_state, dotted_name, sys_path):
temp, sys.path = sys.path, sys_path
try:
__import__(dotted_name)
except ImportError:
# If a module is "corrupt" or not really a Python module or whatever.
warnings.warn(
"Module %s not importable in path %s." % (dotted_name, sys_path),
UserWarning,
stacklevel=2,
)
return None
except Exception:
# Since __import__ pretty much makes code execution possible, just
# catch any error here and print it.
warnings.warn(
"Cannot import:\n%s" % traceback.format_exc(), UserWarning, stacklevel=2
)
return None
finally:
sys.path = temp
# Just access the cache after import, because of #59 as well as the very
# complicated import structure of Python.
module = sys.modules[dotted_name]
return create_access_path(inference_state, module)
class AccessPath:
def __init__(self, accesses):
self.accesses = accesses
def create_access_path(inference_state, obj):
access = create_access(inference_state, obj)
return AccessPath(access.get_access_path_tuples())
def get_api_type(obj):
if inspect.isclass(obj):
return 'class'
elif inspect.ismodule(obj):
return 'module'
elif inspect.isbuiltin(obj) or inspect.ismethod(obj) \
or inspect.ismethoddescriptor(obj) or inspect.isfunction(obj):
return 'function'
# Everything else...
return 'instance'
class DirectObjectAccess:
def __init__(self, inference_state, obj):
self._inference_state = inference_state
self._obj = obj
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.get_repr())
def _create_access(self, obj):
return create_access(self._inference_state, obj)
def _create_access_path(self, obj):
return create_access_path(self._inference_state, obj)
def py__bool__(self):
return bool(self._obj)
def py__file__(self) -> Optional[Path]:
try:
return Path(self._obj.__file__)
except AttributeError:
return None
def py__doc__(self):
return inspect.getdoc(self._obj) or ''
def py__name__(self):
if not _is_class_instance(self._obj) or \
inspect.ismethoddescriptor(self._obj): # slots
cls = self._obj
else:
try:
cls = self._obj.__class__
except AttributeError:
# happens with numpy.core.umath._UFUNC_API (you get it
# automatically by doing `import numpy`.
return None
try:
return cls.__name__
except AttributeError:
return None
def py__mro__accesses(self):
return tuple(self._create_access_path(cls) for cls in self._obj.__mro__[1:])
def py__getitem__all_values(self):
if isinstance(self._obj, dict):
return [self._create_access_path(v) for v in self._obj.values()]
if isinstance(self._obj, (list, tuple)):
return [self._create_access_path(v) for v in self._obj]
if self.is_instance():
cls = DirectObjectAccess(self._inference_state, self._obj.__class__)
return cls.py__getitem__all_values()
try:
getitem = self._obj.__getitem__
except AttributeError:
pass
else:
annotation = DirectObjectAccess(self._inference_state, getitem).get_return_annotation()
if annotation is not None:
return [annotation]
return None
def py__simple_getitem__(self, index):
if type(self._obj) not in ALLOWED_GETITEM_TYPES:
# Get rid of side effects, we won't call custom `__getitem__`s.
return None
return self._create_access_path(self._obj[index])
def py__iter__list(self):
try:
iter_method = self._obj.__iter__
except AttributeError:
return None
else:
p = DirectObjectAccess(self._inference_state, iter_method).get_return_annotation()
if p is not None:
return [p]
if type(self._obj) not in ALLOWED_GETITEM_TYPES:
# Get rid of side effects, we won't call custom `__getitem__`s.
return []
lst = []
for i, part in enumerate(self._obj):
if i > 20:
# Should not go crazy with large iterators
break
lst.append(self._create_access_path(part))
return lst
def py__class__(self):
return self._create_access_path(self._obj.__class__)
def py__bases__(self):
return [self._create_access_path(base) for base in self._obj.__bases__]
def py__path__(self):
paths = getattr(self._obj, '__path__', None)
# Avoid some weird hacks that would just fail, because they cannot be
# used by pickle.
if not isinstance(paths, list) \
or not all(isinstance(p, str) for p in paths):
return None
return paths
@shorten_repr
def get_repr(self):
if inspect.ismodule(self._obj):
return repr(self._obj)
# Try to avoid execution of the property.
if safe_getattr(self._obj, '__module__', default='') == 'builtins':
return repr(self._obj)
type_ = type(self._obj)
if type_ == type:
return type.__repr__(self._obj)
if safe_getattr(type_, '__module__', default='') == 'builtins':
# Allow direct execution of repr for builtins.
return repr(self._obj)
return object.__repr__(self._obj)
def is_class(self):
return inspect.isclass(self._obj)
def is_function(self):
return inspect.isfunction(self._obj) or inspect.ismethod(self._obj)
def is_module(self):
return inspect.ismodule(self._obj)
def is_instance(self):
return _is_class_instance(self._obj)
def ismethoddescriptor(self):
return inspect.ismethoddescriptor(self._obj)
def get_qualified_names(self):
def try_to_get_name(obj):
return getattr(obj, '__qualname__', getattr(obj, '__name__', None))
if self.is_module():
return ()
name = try_to_get_name(self._obj)
if name is None:
name = try_to_get_name(type(self._obj))
if name is None:
return ()
return tuple(name.split('.'))
def dir(self):
return dir(self._obj)
def has_iter(self):
try:
iter(self._obj)
return True
except TypeError:
return False
def is_allowed_getattr(self, name, safe=True):
# TODO this API is ugly.
if not safe:
# Unsafe is mostly used to check for __getattr__/__getattribute__.
# getattr_static works for properties, but the underscore methods
# are just ignored (because it's safer and avoids more code
# execution). See also GH #1378.
# Avoid warnings, see comment in the next function.
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
try:
return hasattr(self._obj, name), False
except Exception:
# Obviously has an attribute (propably a property) that
# gets executed, so just avoid all exceptions here.
return False, False
try:
attr, is_get_descriptor = getattr_static(self._obj, name)
except AttributeError:
return False, False
else:
if is_get_descriptor and type(attr) not in ALLOWED_DESCRIPTOR_ACCESS:
# In case of descriptors that have get methods we cannot return
# it's value, because that would mean code execution.
return True, True
return True, False
def getattr_paths(self, name, default=_sentinel):
try:
# Make sure no warnings are printed here, this is autocompletion,
# warnings should not be shown. See also GH #1383.
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
return_obj = getattr(self._obj, name)
except Exception as e:
if default is _sentinel:
if isinstance(e, AttributeError):
# Happens e.g. in properties of
# PyQt4.QtGui.QStyleOptionComboBox.currentText
# -> just set it to None
raise
# Just in case anything happens, return an AttributeError. It
# should not crash.
raise AttributeError
return_obj = default
access = self._create_access(return_obj)
if inspect.ismodule(return_obj):
return [access]
try:
module = return_obj.__module__
except AttributeError:
pass
else:
if module is not None and isinstance(module, str):
try:
__import__(module)
# For some modules like _sqlite3, the __module__ for classes is
# different, in this case it's sqlite3. So we have to try to
# load that "original" module, because it's not loaded yet. If
# we don't do that, we don't really have a "parent" module and
# we would fall back to builtins.
except ImportError:
pass
module = inspect.getmodule(return_obj)
if module is None:
module = inspect.getmodule(type(return_obj))
if module is None:
module = builtins
return [self._create_access(module), access]
def get_safe_value(self):
if type(self._obj) in (bool, bytes, float, int, str, slice) or self._obj is None:
return self._obj
raise ValueError("Object is type %s and not simple" % type(self._obj))
def get_api_type(self):
return get_api_type(self._obj)
def get_array_type(self):
if isinstance(self._obj, dict):
return 'dict'
return None
def get_key_paths(self):
def iter_partial_keys():
# We could use list(keys()), but that might take a lot more memory.
for (i, k) in enumerate(self._obj.keys()):
# Limit key listing at some point. This is artificial, but this
# way we don't get stalled because of slow completions
if i > 50:
break
yield k
return [self._create_access_path(k) for k in iter_partial_keys()]
def get_access_path_tuples(self):
accesses = [create_access(self._inference_state, o) for o in self._get_objects_path()]
return [(access.py__name__(), access) for access in accesses]
def _get_objects_path(self):
def get():
obj = self._obj
yield obj
try:
obj = obj.__objclass__
except AttributeError:
pass
else:
yield obj
try:
# Returns a dotted string path.
imp_plz = obj.__module__
except AttributeError:
# Unfortunately in some cases like `int` there's no __module__
if not inspect.ismodule(obj):
yield builtins
else:
if imp_plz is None:
# Happens for example in `(_ for _ in []).send.__module__`.
yield builtins
else:
try:
yield sys.modules[imp_plz]
except KeyError:
# __module__ can be something arbitrary that doesn't exist.
yield builtins
return list(reversed(list(get())))
def execute_operation(self, other_access_handle, operator):
other_access = other_access_handle.access
op = _OPERATORS[operator]
return self._create_access_path(op(self._obj, other_access._obj))
def get_annotation_name_and_args(self):
"""
Returns Tuple[Optional[str], Tuple[AccessPath, ...]]
"""
name = None
args = ()
if safe_getattr(self._obj, '__module__', default='') == 'typing':
m = re.match(r'typing.(\w+)\[', repr(self._obj))
if m is not None:
name = m.group(1)
import typing
if sys.version_info >= (3, 8):
args = typing.get_args(self._obj)
else:
args = safe_getattr(self._obj, '__args__', default=None)
return name, tuple(self._create_access_path(arg) for arg in args)
def needs_type_completions(self):
return inspect.isclass(self._obj) and self._obj != type
def _annotation_to_str(self, annotation):
return inspect.formatannotation(annotation)
def get_signature_params(self):
return [
SignatureParam(
name=p.name,
has_default=p.default is not p.empty,
default=self._create_access_path(p.default),
default_string=repr(p.default),
has_annotation=p.annotation is not p.empty,
annotation=self._create_access_path(p.annotation),
annotation_string=self._annotation_to_str(p.annotation),
kind_name=str(p.kind)
) for p in self._get_signature().parameters.values()
]
def _get_signature(self):
obj = self._obj
try:
return inspect.signature(obj)
except (RuntimeError, TypeError):
# Reading the code of the function in Python 3.6 implies there are
# at least these errors that might occur if something is wrong with
# the signature. In that case we just want a simple escape for now.
raise ValueError
def get_return_annotation(self):
try:
o = self._obj.__annotations__.get('return')
except AttributeError:
return None
if o is None:
return None
try:
o = typing.get_type_hints(self._obj).get('return')
except Exception:
pass
return self._create_access_path(o)
def negate(self):
return self._create_access_path(-self._obj)
def get_dir_infos(self):
"""
Used to return a couple of infos that are needed when accessing the sub
objects of an objects
"""
tuples = dict(
(name, self.is_allowed_getattr(name))
for name in self.dir()
)
return self.needs_type_completions(), tuples
def _is_class_instance(obj):
"""Like inspect.* methods."""
try:
cls = obj.__class__
except AttributeError:
return False
else:
# The isinstance check for cls is just there so issubclass doesn't
# raise an exception.
return cls != type and isinstance(cls, type) and not issubclass(cls, NOT_CLASS_TYPES)

View File

@ -0,0 +1,121 @@
"""
A static version of getattr.
This is a backport of the Python 3 code with a little bit of additional
information returned to enable Jedi to make decisions.
"""
import types
from jedi import debug
_sentinel = object()
def _check_instance(obj, attr):
instance_dict = {}
try:
instance_dict = object.__getattribute__(obj, "__dict__")
except AttributeError:
pass
return dict.get(instance_dict, attr, _sentinel)
def _check_class(klass, attr):
for entry in _static_getmro(klass):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
return _sentinel
def _is_type(obj):
try:
_static_getmro(obj)
except TypeError:
return False
return True
def _shadowed_dict(klass):
dict_attr = type.__dict__["__dict__"]
for entry in _static_getmro(klass):
try:
class_dict = dict_attr.__get__(entry)["__dict__"]
except KeyError:
pass
else:
if not (type(class_dict) is types.GetSetDescriptorType
and class_dict.__name__ == "__dict__"
and class_dict.__objclass__ is entry):
return class_dict
return _sentinel
def _static_getmro(klass):
mro = type.__dict__['__mro__'].__get__(klass)
if not isinstance(mro, (tuple, list)):
# There are unfortunately no tests for this, I was not able to
# reproduce this in pure Python. However should still solve the issue
# raised in GH #1517.
debug.warning('mro of %s returned %s, should be a tuple' % (klass, mro))
return ()
return mro
def _safe_hasattr(obj, name):
return _check_class(type(obj), name) is not _sentinel
def _safe_is_data_descriptor(obj):
return _safe_hasattr(obj, '__set__') or _safe_hasattr(obj, '__delete__')
def getattr_static(obj, attr, default=_sentinel):
"""Retrieve attributes without triggering dynamic lookup via the
descriptor protocol, __getattr__ or __getattribute__.
Note: this function may not be able to retrieve all attributes
that getattr can fetch (like dynamically created attributes)
and may find attributes that getattr can't (like descriptors
that raise AttributeError). It can also return descriptor objects
instead of instance members in some cases. See the
documentation for details.
Returns a tuple `(attr, is_get_descriptor)`. is_get_descripter means that
the attribute is a descriptor that has a `__get__` attribute.
"""
instance_result = _sentinel
if not _is_type(obj):
klass = type(obj)
dict_attr = _shadowed_dict(klass)
if (dict_attr is _sentinel or type(dict_attr) is types.MemberDescriptorType):
instance_result = _check_instance(obj, attr)
else:
klass = obj
klass_result = _check_class(klass, attr)
if instance_result is not _sentinel and klass_result is not _sentinel:
if _safe_hasattr(klass_result, '__get__') \
and _safe_is_data_descriptor(klass_result):
# A get/set descriptor has priority over everything.
return klass_result, True
if instance_result is not _sentinel:
return instance_result, False
if klass_result is not _sentinel:
return klass_result, _safe_hasattr(klass_result, '__get__')
if obj is klass:
# for types we check the metaclass too
for entry in _static_getmro(type(klass)):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr], False
except KeyError:
pass
if default is not _sentinel:
return default, False
raise AttributeError(attr)

View File

@ -0,0 +1,309 @@
"""
Used only for REPL Completion.
"""
import inspect
from pathlib import Path
from jedi.parser_utils import get_cached_code_lines
from jedi import settings
from jedi.cache import memoize_method
from jedi.inference import compiled
from jedi.file_io import FileIO
from jedi.inference.names import NameWrapper
from jedi.inference.base_value import ValueSet, ValueWrapper, NO_VALUES
from jedi.inference.value import ModuleValue
from jedi.inference.cache import inference_state_function_cache, \
inference_state_method_cache
from jedi.inference.compiled.access import ALLOWED_GETITEM_TYPES, get_api_type
from jedi.inference.gradual.conversion import to_stub
from jedi.inference.context import CompiledContext, CompiledModuleContext, \
TreeContextMixin
_sentinel = object()
class MixedObject(ValueWrapper):
"""
A ``MixedObject`` is used in two ways:
1. It uses the default logic of ``parser.python.tree`` objects,
2. except for getattr calls and signatures. The names dicts are generated
in a fashion like ``CompiledValue``.
This combined logic makes it possible to provide more powerful REPL
completion. It allows side effects that are not noticable with the default
parser structure to still be completeable.
The biggest difference from CompiledValue to MixedObject is that we are
generally dealing with Python code and not with C code. This will generate
fewer special cases, because we in Python you don't have the same freedoms
to modify the runtime.
"""
def __init__(self, compiled_value, tree_value):
super().__init__(tree_value)
self.compiled_value = compiled_value
self.access_handle = compiled_value.access_handle
def get_filters(self, *args, **kwargs):
yield MixedObjectFilter(
self.inference_state, self.compiled_value, self._wrapped_value)
def get_signatures(self):
# Prefer `inspect.signature` over somehow analyzing Python code. It
# should be very precise, especially for stuff like `partial`.
return self.compiled_value.get_signatures()
@inference_state_method_cache(default=NO_VALUES)
def py__call__(self, arguments):
# Fallback to the wrapped value if to stub returns no values.
values = to_stub(self._wrapped_value)
if not values:
values = self._wrapped_value
return values.py__call__(arguments)
def get_safe_value(self, default=_sentinel):
if default is _sentinel:
return self.compiled_value.get_safe_value()
else:
return self.compiled_value.get_safe_value(default)
@property
def array_type(self):
return self.compiled_value.array_type
def get_key_values(self):
return self.compiled_value.get_key_values()
def py__simple_getitem__(self, index):
python_object = self.compiled_value.access_handle.access._obj
if type(python_object) in ALLOWED_GETITEM_TYPES:
return self.compiled_value.py__simple_getitem__(index)
return self._wrapped_value.py__simple_getitem__(index)
def negate(self):
return self.compiled_value.negate()
def _as_context(self):
if self.parent_context is None:
return MixedModuleContext(self)
return MixedContext(self)
def __repr__(self):
return '<%s: %s; %s>' % (
type(self).__name__,
self.access_handle.get_repr(),
self._wrapped_value,
)
class MixedContext(CompiledContext, TreeContextMixin):
@property
def compiled_value(self):
return self._value.compiled_value
class MixedModuleContext(CompiledModuleContext, MixedContext):
pass
class MixedName(NameWrapper):
"""
The ``CompiledName._compiled_value`` is our MixedObject.
"""
def __init__(self, wrapped_name, parent_tree_value):
super().__init__(wrapped_name)
self._parent_tree_value = parent_tree_value
@property
def start_pos(self):
values = list(self.infer())
if not values:
# This means a start_pos that doesn't exist (compiled objects).
return 0, 0
return values[0].name.start_pos
@memoize_method
def infer(self):
compiled_value = self._wrapped_name.infer_compiled_value()
tree_value = self._parent_tree_value
if tree_value.is_instance() or tree_value.is_class():
tree_values = tree_value.py__getattribute__(self.string_name)
if compiled_value.is_function():
return ValueSet({MixedObject(compiled_value, v) for v in tree_values})
module_context = tree_value.get_root_context()
return _create(self._inference_state, compiled_value, module_context)
class MixedObjectFilter(compiled.CompiledValueFilter):
def __init__(self, inference_state, compiled_value, tree_value):
super().__init__(inference_state, compiled_value)
self._tree_value = tree_value
def _create_name(self, name):
return MixedName(
super()._create_name(name),
self._tree_value,
)
@inference_state_function_cache()
def _load_module(inference_state, path):
return inference_state.parse(
path=path,
cache=True,
diff_cache=settings.fast_parser,
cache_path=settings.cache_directory
).get_root_node()
def _get_object_to_check(python_object):
"""Check if inspect.getfile has a chance to find the source."""
try:
python_object = inspect.unwrap(python_object)
except ValueError:
# Can return a ValueError when it wraps around
pass
if (inspect.ismodule(python_object)
or inspect.isclass(python_object)
or inspect.ismethod(python_object)
or inspect.isfunction(python_object)
or inspect.istraceback(python_object)
or inspect.isframe(python_object)
or inspect.iscode(python_object)):
return python_object
try:
return python_object.__class__
except AttributeError:
raise TypeError # Prevents computation of `repr` within inspect.
def _find_syntax_node_name(inference_state, python_object):
original_object = python_object
try:
python_object = _get_object_to_check(python_object)
path = inspect.getsourcefile(python_object)
except (OSError, TypeError):
# The type might not be known (e.g. class_with_dict.__weakref__)
return None
path = None if path is None else Path(path)
try:
if path is None or not path.exists():
# The path might not exist or be e.g. <stdin>.
return None
except OSError:
# Might raise an OSError on Windows:
#
# [WinError 123] The filename, directory name, or volume label
# syntax is incorrect: '<string>'
return None
file_io = FileIO(path)
module_node = _load_module(inference_state, path)
if inspect.ismodule(python_object):
# We don't need to check names for modules, because there's not really
# a way to write a module in a module in Python (and also __name__ can
# be something like ``email.utils``).
code_lines = get_cached_code_lines(inference_state.grammar, path)
return module_node, module_node, file_io, code_lines
try:
name_str = python_object.__name__
except AttributeError:
# Stuff like python_function.__code__.
return None
if name_str == '<lambda>':
return None # It's too hard to find lambdas.
# Doesn't always work (e.g. os.stat_result)
names = module_node.get_used_names().get(name_str, [])
# Only functions and classes are relevant. If a name e.g. points to an
# import, it's probably a builtin (like collections.deque) and needs to be
# ignored.
names = [
n for n in names
if n.parent.type in ('funcdef', 'classdef') and n.parent.name == n
]
if not names:
return None
try:
code = python_object.__code__
# By using the line number of a code object we make the lookup in a
# file pretty easy. There's still a possibility of people defining
# stuff like ``a = 3; foo(a); a = 4`` on the same line, but if people
# do so we just don't care.
line_nr = code.co_firstlineno
except AttributeError:
pass
else:
line_names = [name for name in names if name.start_pos[0] == line_nr]
# There's a chance that the object is not available anymore, because
# the code has changed in the background.
if line_names:
names = line_names
code_lines = get_cached_code_lines(inference_state.grammar, path)
# It's really hard to actually get the right definition, here as a last
# resort we just return the last one. This chance might lead to odd
# completions at some points but will lead to mostly correct type
# inference, because people tend to define a public name in a module only
# once.
tree_node = names[-1].parent
if tree_node.type == 'funcdef' and get_api_type(original_object) == 'instance':
# If an instance is given and we're landing on a function (e.g.
# partial in 3.5), something is completely wrong and we should not
# return that.
return None
return module_node, tree_node, file_io, code_lines
@inference_state_function_cache()
def _create(inference_state, compiled_value, module_context):
# TODO accessing this is bad, but it probably doesn't matter that much,
# because we're working with interpreteters only here.
python_object = compiled_value.access_handle.access._obj
result = _find_syntax_node_name(inference_state, python_object)
if result is None:
# TODO Care about generics from stuff like `[1]` and don't return like this.
if type(python_object) in (dict, list, tuple):
return ValueSet({compiled_value})
tree_values = to_stub(compiled_value)
if not tree_values:
return ValueSet({compiled_value})
else:
module_node, tree_node, file_io, code_lines = result
if module_context is None or module_context.tree_node != module_node:
root_compiled_value = compiled_value.get_root_context().get_value()
# TODO this __name__ might be wrong.
name = root_compiled_value.py__name__()
string_names = tuple(name.split('.'))
module_value = ModuleValue(
inference_state, module_node,
file_io=file_io,
string_names=string_names,
code_lines=code_lines,
is_package=root_compiled_value.is_package(),
)
if name is not None:
inference_state.module_cache.add(string_names, ValueSet([module_value]))
module_context = module_value.as_context()
tree_values = ValueSet({module_context.create_value(tree_node)})
if tree_node.type == 'classdef':
if not compiled_value.is_class():
# Is an instance, not a class.
tree_values = tree_values.execute_with_values()
return ValueSet(
MixedObject(compiled_value, tree_value=tree_value)
for tree_value in tree_values
)

View File

@ -0,0 +1,391 @@
"""
Makes it possible to do the compiled analysis in a subprocess. This has two
goals:
1. Making it safer - Segfaults and RuntimeErrors as well as stdout/stderr can
be ignored and dealt with.
2. Make it possible to handle different Python versions as well as virtualenvs.
"""
import collections
import os
import sys
import queue
import subprocess
import traceback
import weakref
from functools import partial
from threading import Thread
from jedi._compatibility import pickle_dump, pickle_load
from jedi import debug
from jedi.cache import memoize_method
from jedi.inference.compiled.subprocess import functions
from jedi.inference.compiled.access import DirectObjectAccess, AccessPath, \
SignatureParam
from jedi.api.exceptions import InternalError
_MAIN_PATH = os.path.join(os.path.dirname(__file__), '__main__.py')
PICKLE_PROTOCOL = 4
def _GeneralizedPopen(*args, **kwargs):
if os.name == 'nt':
try:
# Was introduced in Python 3.7.
CREATE_NO_WINDOW = subprocess.CREATE_NO_WINDOW
except AttributeError:
CREATE_NO_WINDOW = 0x08000000
kwargs['creationflags'] = CREATE_NO_WINDOW
# The child process doesn't need file descriptors except 0, 1, 2.
# This is unix only.
kwargs['close_fds'] = 'posix' in sys.builtin_module_names
return subprocess.Popen(*args, **kwargs)
def _enqueue_output(out, queue_):
for line in iter(out.readline, b''):
queue_.put(line)
def _add_stderr_to_debug(stderr_queue):
while True:
# Try to do some error reporting from the subprocess and print its
# stderr contents.
try:
line = stderr_queue.get_nowait()
line = line.decode('utf-8', 'replace')
debug.warning('stderr output: %s' % line.rstrip('\n'))
except queue.Empty:
break
def _get_function(name):
return getattr(functions, name)
def _cleanup_process(process, thread):
try:
process.kill()
process.wait()
except OSError:
# Raised if the process is already killed.
pass
thread.join()
for stream in [process.stdin, process.stdout, process.stderr]:
try:
stream.close()
except OSError:
# Raised if the stream is broken.
pass
class _InferenceStateProcess:
def __init__(self, inference_state):
self._inference_state_weakref = weakref.ref(inference_state)
self._inference_state_id = id(inference_state)
self._handles = {}
def get_or_create_access_handle(self, obj):
id_ = id(obj)
try:
return self.get_access_handle(id_)
except KeyError:
access = DirectObjectAccess(self._inference_state_weakref(), obj)
handle = AccessHandle(self, access, id_)
self.set_access_handle(handle)
return handle
def get_access_handle(self, id_):
return self._handles[id_]
def set_access_handle(self, handle):
self._handles[handle.id] = handle
class InferenceStateSameProcess(_InferenceStateProcess):
"""
Basically just an easy access to functions.py. It has the same API
as InferenceStateSubprocess and does the same thing without using a subprocess.
This is necessary for the Interpreter process.
"""
def __getattr__(self, name):
return partial(_get_function(name), self._inference_state_weakref())
class InferenceStateSubprocess(_InferenceStateProcess):
def __init__(self, inference_state, compiled_subprocess):
super().__init__(inference_state)
self._used = False
self._compiled_subprocess = compiled_subprocess
def __getattr__(self, name):
func = _get_function(name)
def wrapper(*args, **kwargs):
self._used = True
result = self._compiled_subprocess.run(
self._inference_state_weakref(),
func,
args=args,
kwargs=kwargs,
)
# IMO it should be possible to create a hook in pickle.load to
# mess with the loaded objects. However it's extremely complicated
# to work around this so just do it with this call. ~ dave
return self._convert_access_handles(result)
return wrapper
def _convert_access_handles(self, obj):
if isinstance(obj, SignatureParam):
return SignatureParam(*self._convert_access_handles(tuple(obj)))
elif isinstance(obj, tuple):
return tuple(self._convert_access_handles(o) for o in obj)
elif isinstance(obj, list):
return [self._convert_access_handles(o) for o in obj]
elif isinstance(obj, AccessHandle):
try:
# Rewrite the access handle to one we're already having.
obj = self.get_access_handle(obj.id)
except KeyError:
obj.add_subprocess(self)
self.set_access_handle(obj)
elif isinstance(obj, AccessPath):
return AccessPath(self._convert_access_handles(obj.accesses))
return obj
def __del__(self):
if self._used and not self._compiled_subprocess.is_crashed:
self._compiled_subprocess.delete_inference_state(self._inference_state_id)
class CompiledSubprocess:
is_crashed = False
def __init__(self, executable, env_vars=None):
self._executable = executable
self._env_vars = env_vars
self._inference_state_deletion_queue = collections.deque()
self._cleanup_callable = lambda: None
def __repr__(self):
pid = os.getpid()
return '<%s _executable=%r, is_crashed=%r, pid=%r>' % (
self.__class__.__name__,
self._executable,
self.is_crashed,
pid,
)
@memoize_method
def _get_process(self):
debug.dbg('Start environment subprocess %s', self._executable)
parso_path = sys.modules['parso'].__file__
args = (
self._executable,
_MAIN_PATH,
os.path.dirname(os.path.dirname(parso_path)),
'.'.join(str(x) for x in sys.version_info[:3]),
)
process = _GeneralizedPopen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self._env_vars
)
self._stderr_queue = queue.Queue()
self._stderr_thread = t = Thread(
target=_enqueue_output,
args=(process.stderr, self._stderr_queue)
)
t.daemon = True
t.start()
# Ensure the subprocess is properly cleaned up when the object
# is garbage collected.
self._cleanup_callable = weakref.finalize(self,
_cleanup_process,
process,
t)
return process
def run(self, inference_state, function, args=(), kwargs={}):
# Delete old inference_states.
while True:
try:
inference_state_id = self._inference_state_deletion_queue.pop()
except IndexError:
break
else:
self._send(inference_state_id, None)
assert callable(function)
return self._send(id(inference_state), function, args, kwargs)
def get_sys_path(self):
return self._send(None, functions.get_sys_path, (), {})
def _kill(self):
self.is_crashed = True
self._cleanup_callable()
def _send(self, inference_state_id, function, args=(), kwargs={}):
if self.is_crashed:
raise InternalError("The subprocess %s has crashed." % self._executable)
data = inference_state_id, function, args, kwargs
try:
pickle_dump(data, self._get_process().stdin, PICKLE_PROTOCOL)
except BrokenPipeError:
self._kill()
raise InternalError("The subprocess %s was killed. Maybe out of memory?"
% self._executable)
try:
is_exception, traceback, result = pickle_load(self._get_process().stdout)
except EOFError as eof_error:
try:
stderr = self._get_process().stderr.read().decode('utf-8', 'replace')
except Exception as exc:
stderr = '<empty/not available (%r)>' % exc
self._kill()
_add_stderr_to_debug(self._stderr_queue)
raise InternalError(
"The subprocess %s has crashed (%r, stderr=%s)." % (
self._executable,
eof_error,
stderr,
))
_add_stderr_to_debug(self._stderr_queue)
if is_exception:
# Replace the attribute error message with a the traceback. It's
# way more informative.
result.args = (traceback,)
raise result
return result
def delete_inference_state(self, inference_state_id):
"""
Currently we are not deleting inference_state instantly. They only get
deleted once the subprocess is used again. It would probably a better
solution to move all of this into a thread. However, the memory usage
of a single inference_state shouldn't be that high.
"""
# With an argument - the inference_state gets deleted.
self._inference_state_deletion_queue.append(inference_state_id)
class Listener:
def __init__(self):
self._inference_states = {}
# TODO refactor so we don't need to process anymore just handle
# controlling.
self._process = _InferenceStateProcess(Listener)
def _get_inference_state(self, function, inference_state_id):
from jedi.inference import InferenceState
try:
inference_state = self._inference_states[inference_state_id]
except KeyError:
from jedi import InterpreterEnvironment
inference_state = InferenceState(
# The project is not actually needed. Nothing should need to
# access it.
project=None,
environment=InterpreterEnvironment()
)
self._inference_states[inference_state_id] = inference_state
return inference_state
def _run(self, inference_state_id, function, args, kwargs):
if inference_state_id is None:
return function(*args, **kwargs)
elif function is None:
del self._inference_states[inference_state_id]
else:
inference_state = self._get_inference_state(function, inference_state_id)
# Exchange all handles
args = list(args)
for i, arg in enumerate(args):
if isinstance(arg, AccessHandle):
args[i] = inference_state.compiled_subprocess.get_access_handle(arg.id)
for key, value in kwargs.items():
if isinstance(value, AccessHandle):
kwargs[key] = inference_state.compiled_subprocess.get_access_handle(value.id)
return function(inference_state, *args, **kwargs)
def listen(self):
stdout = sys.stdout
# Mute stdout. Nobody should actually be able to write to it,
# because stdout is used for IPC.
sys.stdout = open(os.devnull, 'w')
stdin = sys.stdin
stdout = stdout.buffer
stdin = stdin.buffer
while True:
try:
payload = pickle_load(stdin)
except EOFError:
# It looks like the parent process closed.
# Don't make a big fuss here and just exit.
exit(0)
try:
result = False, None, self._run(*payload)
except Exception as e:
result = True, traceback.format_exc(), e
pickle_dump(result, stdout, PICKLE_PROTOCOL)
class AccessHandle:
def __init__(self, subprocess, access, id_):
self.access = access
self._subprocess = subprocess
self.id = id_
def add_subprocess(self, subprocess):
self._subprocess = subprocess
def __repr__(self):
try:
detail = self.access
except AttributeError:
detail = '#' + str(self.id)
return '<%s of %s>' % (self.__class__.__name__, detail)
def __getstate__(self):
return self.id
def __setstate__(self, state):
self.id = state
def __getattr__(self, name):
if name in ('id', 'access') or name.startswith('_'):
raise AttributeError("Something went wrong with unpickling")
# print('getattr', name, file=sys.stderr)
return partial(self._workaround, name)
def _workaround(self, name, *args, **kwargs):
"""
TODO Currently we're passing slice objects around. This should not
happen. They are also the only unhashable objects that we're passing
around.
"""
if args and isinstance(args[0], slice):
return self._subprocess.get_compiled_method_return(self.id, name, *args, **kwargs)
return self._cached_results(name, *args, **kwargs)
@memoize_method
def _cached_results(self, name, *args, **kwargs):
return self._subprocess.get_compiled_method_return(self.id, name, *args, **kwargs)

View File

@ -0,0 +1,40 @@
import os
import sys
from importlib.abc import MetaPathFinder
from importlib.machinery import PathFinder
# Remove the first entry, because it's simply a directory entry that equals
# this directory.
del sys.path[0]
def _get_paths():
# Get the path to jedi.
_d = os.path.dirname
_jedi_path = _d(_d(_d(_d(_d(__file__)))))
_parso_path = sys.argv[1]
# The paths are the directory that jedi and parso lie in.
return {'jedi': _jedi_path, 'parso': _parso_path}
class _ExactImporter(MetaPathFinder):
def __init__(self, path_dct):
self._path_dct = path_dct
def find_module(self, fullname, path=None):
if path is None and fullname in self._path_dct:
p = self._path_dct[fullname]
loader = PathFinder.find_module(fullname, path=[p])
return loader
return None
# Try to import jedi/parso.
sys.meta_path.insert(0, _ExactImporter(_get_paths()))
from jedi.inference.compiled import subprocess # noqa: E402
sys.meta_path.pop(0)
# Retrieve the pickle protocol.
host_sys_version = [int(x) for x in sys.argv[2].split('.')]
# And finally start the client.
subprocess.Listener().listen()

View File

@ -0,0 +1,255 @@
import sys
import os
import inspect
import importlib
import warnings
from pathlib import Path
from zipfile import ZipFile
from zipimport import zipimporter, ZipImportError
from importlib.machinery import all_suffixes
from jedi.inference.compiled import access
from jedi import debug
from jedi import parser_utils
from jedi.file_io import KnownContentFileIO, ZipFileIO
def get_sys_path():
return sys.path
def load_module(inference_state, **kwargs):
return access.load_module(inference_state, **kwargs)
def get_compiled_method_return(inference_state, id, attribute, *args, **kwargs):
handle = inference_state.compiled_subprocess.get_access_handle(id)
return getattr(handle.access, attribute)(*args, **kwargs)
def create_simple_object(inference_state, obj):
return access.create_access_path(inference_state, obj)
def get_module_info(inference_state, sys_path=None, full_name=None, **kwargs):
"""
Returns Tuple[Union[NamespaceInfo, FileIO, None], Optional[bool]]
"""
if sys_path is not None:
sys.path, temp = sys_path, sys.path
try:
return _find_module(full_name=full_name, **kwargs)
except ImportError:
return None, None
finally:
if sys_path is not None:
sys.path = temp
def get_builtin_module_names(inference_state):
return sys.builtin_module_names
def _test_raise_error(inference_state, exception_type):
"""
Raise an error to simulate certain problems for unit tests.
"""
raise exception_type
def _test_print(inference_state, stderr=None, stdout=None):
"""
Force some prints in the subprocesses. This exists for unit tests.
"""
if stderr is not None:
print(stderr, file=sys.stderr)
sys.stderr.flush()
if stdout is not None:
print(stdout)
sys.stdout.flush()
def _get_init_path(directory_path):
"""
The __init__ file can be searched in a directory. If found return it, else
None.
"""
for suffix in all_suffixes():
path = os.path.join(directory_path, '__init__' + suffix)
if os.path.exists(path):
return path
return None
def safe_literal_eval(inference_state, value):
return parser_utils.safe_literal_eval(value)
def iter_module_names(*args, **kwargs):
return list(_iter_module_names(*args, **kwargs))
def _iter_module_names(inference_state, paths):
# Python modules/packages
for path in paths:
try:
dir_entries = ((entry.name, entry.is_dir()) for entry in os.scandir(path))
except OSError:
try:
zip_import_info = zipimporter(path)
# Unfortunately, there is no public way to access zipimporter's
# private _files member. We therefore have to use a
# custom function to iterate over the files.
dir_entries = _zip_list_subdirectory(
zip_import_info.archive, zip_import_info.prefix)
except ZipImportError:
# The file might not exist or reading it might lead to an error.
debug.warning("Not possible to list directory: %s", path)
continue
for name, is_dir in dir_entries:
# First Namespaces then modules/stubs
if is_dir:
# pycache is obviously not an interesting namespace. Also the
# name must be a valid identifier.
if name != '__pycache__' and name.isidentifier():
yield name
else:
if name.endswith('.pyi'): # Stub files
modname = name[:-4]
else:
modname = inspect.getmodulename(name)
if modname and '.' not in modname:
if modname != '__init__':
yield modname
def _find_module(string, path=None, full_name=None, is_global_search=True):
"""
Provides information about a module.
This function isolates the differences in importing libraries introduced with
python 3.3 on; it gets a module name and optionally a path. It will return a
tuple containin an open file for the module (if not builtin), the filename
or the name of the module if it is a builtin one and a boolean indicating
if the module is contained in a package.
"""
spec = None
loader = None
for finder in sys.meta_path:
if is_global_search and finder != importlib.machinery.PathFinder:
p = None
else:
p = path
try:
find_spec = finder.find_spec
except AttributeError:
# These are old-school clases that still have a different API, just
# ignore those.
continue
spec = find_spec(string, p)
if spec is not None:
loader = spec.loader
if loader is None and not spec.has_location:
# This is a namespace package.
full_name = string if not path else full_name
implicit_ns_info = ImplicitNSInfo(full_name, spec.submodule_search_locations._path)
return implicit_ns_info, True
break
return _find_module_py33(string, path, loader)
def _find_module_py33(string, path=None, loader=None, full_name=None, is_global_search=True):
loader = loader or importlib.machinery.PathFinder.find_module(string, path)
if loader is None and path is None: # Fallback to find builtins
try:
with warnings.catch_warnings(record=True):
# Mute "DeprecationWarning: Use importlib.util.find_spec()
# instead." While we should replace that in the future, it's
# probably good to wait until we deprecate Python 3.3, since
# it was added in Python 3.4 and find_loader hasn't been
# removed in 3.6.
loader = importlib.find_loader(string)
except ValueError as e:
# See #491. Importlib might raise a ValueError, to avoid this, we
# just raise an ImportError to fix the issue.
raise ImportError("Originally " + repr(e))
if loader is None:
raise ImportError("Couldn't find a loader for {}".format(string))
return _from_loader(loader, string)
def _from_loader(loader, string):
try:
is_package_method = loader.is_package
except AttributeError:
is_package = False
else:
is_package = is_package_method(string)
try:
get_filename = loader.get_filename
except AttributeError:
return None, is_package
else:
module_path = get_filename(string)
# To avoid unicode and read bytes, "overwrite" loader.get_source if
# possible.
try:
f = type(loader).get_source
except AttributeError:
raise ImportError("get_source was not defined on loader")
if f is not importlib.machinery.SourceFileLoader.get_source:
# Unfortunately we are reading unicode here, not bytes.
# It seems hard to get bytes, because the zip importer
# logic just unpacks the zip file and returns a file descriptor
# that we cannot as easily access. Therefore we just read it as
# a string in the cases where get_source was overwritten.
code = loader.get_source(string)
else:
code = _get_source(loader, string)
if code is None:
return None, is_package
if isinstance(loader, zipimporter):
return ZipFileIO(module_path, code, Path(loader.archive)), is_package
return KnownContentFileIO(module_path, code), is_package
def _get_source(loader, fullname):
"""
This method is here as a replacement for SourceLoader.get_source. That
method returns unicode, but we prefer bytes.
"""
path = loader.get_filename(fullname)
try:
return loader.get_data(path)
except OSError:
raise ImportError('source not available through get_data()',
name=fullname)
def _zip_list_subdirectory(zip_path, zip_subdir_path):
zip_file = ZipFile(zip_path)
zip_subdir_path = Path(zip_subdir_path)
zip_content_file_paths = zip_file.namelist()
for raw_file_name in zip_content_file_paths:
file_path = Path(raw_file_name)
if file_path.parent == zip_subdir_path:
file_path = file_path.relative_to(zip_subdir_path)
yield file_path.name, raw_file_name.endswith("/")
class ImplicitNSInfo:
"""Stores information returned from an implicit namespace spec"""
def __init__(self, name, paths):
self.name = name
self.paths = paths

View File

@ -0,0 +1,612 @@
"""
Imitate the parser representation.
"""
import re
from functools import partial
from inspect import Parameter
from pathlib import Path
from typing import Optional
from jedi import debug
from jedi.inference.utils import to_list
from jedi.cache import memoize_method
from jedi.inference.filters import AbstractFilter
from jedi.inference.names import AbstractNameDefinition, ValueNameMixin, \
ParamNameInterface
from jedi.inference.base_value import Value, ValueSet, NO_VALUES
from jedi.inference.lazy_value import LazyKnownValue
from jedi.inference.compiled.access import _sentinel
from jedi.inference.cache import inference_state_function_cache
from jedi.inference.helpers import reraise_getitem_errors
from jedi.inference.signature import BuiltinSignature
from jedi.inference.context import CompiledContext, CompiledModuleContext
class CheckAttribute:
"""Raises :exc:`AttributeError` if the attribute X is not available."""
def __init__(self, check_name=None):
# Remove the py in front of e.g. py__call__.
self.check_name = check_name
def __call__(self, func):
self.func = func
if self.check_name is None:
self.check_name = func.__name__[2:]
return self
def __get__(self, instance, owner):
if instance is None:
return self
# This might raise an AttributeError. That's wanted.
instance.access_handle.getattr_paths(self.check_name)
return partial(self.func, instance)
class CompiledValue(Value):
def __init__(self, inference_state, access_handle, parent_context=None):
super().__init__(inference_state, parent_context)
self.access_handle = access_handle
def py__call__(self, arguments):
return_annotation = self.access_handle.get_return_annotation()
if return_annotation is not None:
# TODO the return annotation may also be a string.
return create_from_access_path(
self.inference_state,
return_annotation
).execute_annotation()
try:
self.access_handle.getattr_paths('__call__')
except AttributeError:
return super().py__call__(arguments)
else:
if self.access_handle.is_class():
from jedi.inference.value import CompiledInstance
return ValueSet([
CompiledInstance(self.inference_state, self.parent_context, self, arguments)
])
else:
return ValueSet(self._execute_function(arguments))
@CheckAttribute()
def py__class__(self):
return create_from_access_path(self.inference_state, self.access_handle.py__class__())
@CheckAttribute()
def py__mro__(self):
return (self,) + tuple(
create_from_access_path(self.inference_state, access)
for access in self.access_handle.py__mro__accesses()
)
@CheckAttribute()
def py__bases__(self):
return tuple(
create_from_access_path(self.inference_state, access)
for access in self.access_handle.py__bases__()
)
def get_qualified_names(self):
return self.access_handle.get_qualified_names()
def py__bool__(self):
return self.access_handle.py__bool__()
def is_class(self):
return self.access_handle.is_class()
def is_function(self):
return self.access_handle.is_function()
def is_module(self):
return self.access_handle.is_module()
def is_compiled(self):
return True
def is_stub(self):
return False
def is_instance(self):
return self.access_handle.is_instance()
def py__doc__(self):
return self.access_handle.py__doc__()
@to_list
def get_param_names(self):
try:
signature_params = self.access_handle.get_signature_params()
except ValueError: # Has no signature
params_str, ret = self._parse_function_doc()
if not params_str:
tokens = []
else:
tokens = params_str.split(',')
if self.access_handle.ismethoddescriptor():
tokens.insert(0, 'self')
for p in tokens:
name, _, default = p.strip().partition('=')
yield UnresolvableParamName(self, name, default)
else:
for signature_param in signature_params:
yield SignatureParamName(self, signature_param)
def get_signatures(self):
_, return_string = self._parse_function_doc()
return [BuiltinSignature(self, return_string)]
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.access_handle.get_repr())
@memoize_method
def _parse_function_doc(self):
doc = self.py__doc__()
if doc is None:
return '', ''
return _parse_function_doc(doc)
@property
def api_type(self):
return self.access_handle.get_api_type()
def get_filters(self, is_instance=False, origin_scope=None):
yield self._ensure_one_filter(is_instance)
@memoize_method
def _ensure_one_filter(self, is_instance):
return CompiledValueFilter(self.inference_state, self, is_instance)
def py__simple_getitem__(self, index):
with reraise_getitem_errors(IndexError, KeyError, TypeError):
try:
access = self.access_handle.py__simple_getitem__(index)
except AttributeError:
return super().py__simple_getitem__(index)
if access is None:
return super().py__simple_getitem__(index)
return ValueSet([create_from_access_path(self.inference_state, access)])
def py__getitem__(self, index_value_set, contextualized_node):
all_access_paths = self.access_handle.py__getitem__all_values()
if all_access_paths is None:
# This means basically that no __getitem__ has been defined on this
# object.
return super().py__getitem__(index_value_set, contextualized_node)
return ValueSet(
create_from_access_path(self.inference_state, access)
for access in all_access_paths
)
def py__iter__(self, contextualized_node=None):
if not self.access_handle.has_iter():
yield from super().py__iter__(contextualized_node)
access_path_list = self.access_handle.py__iter__list()
if access_path_list is None:
# There is no __iter__ method on this object.
return
for access in access_path_list:
yield LazyKnownValue(create_from_access_path(self.inference_state, access))
def py__name__(self):
return self.access_handle.py__name__()
@property
def name(self):
name = self.py__name__()
if name is None:
name = self.access_handle.get_repr()
return CompiledValueName(self, name)
def _execute_function(self, params):
from jedi.inference import docstrings
from jedi.inference.compiled import builtin_from_name
if self.api_type != 'function':
return
for name in self._parse_function_doc()[1].split():
try:
# TODO wtf is this? this is exactly the same as the thing
# below. It uses getattr as well.
self.inference_state.builtins_module.access_handle.getattr_paths(name)
except AttributeError:
continue
else:
bltn_obj = builtin_from_name(self.inference_state, name)
yield from self.inference_state.execute(bltn_obj, params)
yield from docstrings.infer_return_types(self)
def get_safe_value(self, default=_sentinel):
try:
return self.access_handle.get_safe_value()
except ValueError:
if default == _sentinel:
raise
return default
def execute_operation(self, other, operator):
try:
return ValueSet([create_from_access_path(
self.inference_state,
self.access_handle.execute_operation(other.access_handle, operator)
)])
except TypeError:
return NO_VALUES
def execute_annotation(self):
if self.access_handle.get_repr() == 'None':
# None as an annotation doesn't need to be executed.
return ValueSet([self])
name, args = self.access_handle.get_annotation_name_and_args()
arguments = [
ValueSet([create_from_access_path(self.inference_state, path)])
for path in args
]
if name == 'Union':
return ValueSet.from_sets(arg.execute_annotation() for arg in arguments)
elif name:
# While with_generics only exists on very specific objects, we
# should probably be fine, because we control all the typing
# objects.
return ValueSet([
v.with_generics(arguments)
for v in self.inference_state.typing_module.py__getattribute__(name)
]).execute_annotation()
return super().execute_annotation()
def negate(self):
return create_from_access_path(self.inference_state, self.access_handle.negate())
def get_metaclasses(self):
return NO_VALUES
def _as_context(self):
return CompiledContext(self)
@property
def array_type(self):
return self.access_handle.get_array_type()
def get_key_values(self):
return [
create_from_access_path(self.inference_state, k)
for k in self.access_handle.get_key_paths()
]
def get_type_hint(self, add_class_info=True):
if self.access_handle.get_repr() in ('None', "<class 'NoneType'>"):
return 'None'
return None
class CompiledModule(CompiledValue):
file_io = None # For modules
def _as_context(self):
return CompiledModuleContext(self)
def py__path__(self):
return self.access_handle.py__path__()
def is_package(self):
return self.py__path__() is not None
@property
def string_names(self):
# For modules
name = self.py__name__()
if name is None:
return ()
return tuple(name.split('.'))
def py__file__(self) -> Optional[Path]:
return self.access_handle.py__file__() # type: ignore[no-any-return]
class CompiledName(AbstractNameDefinition):
def __init__(self, inference_state, parent_value, name):
self._inference_state = inference_state
self.parent_context = parent_value.as_context()
self._parent_value = parent_value
self.string_name = name
def py__doc__(self):
return self.infer_compiled_value().py__doc__()
def _get_qualified_names(self):
parent_qualified_names = self.parent_context.get_qualified_names()
if parent_qualified_names is None:
return None
return parent_qualified_names + (self.string_name,)
def get_defining_qualified_value(self):
context = self.parent_context
if context.is_module() or context.is_class():
return self.parent_context.get_value() # Might be None
return None
def __repr__(self):
try:
name = self.parent_context.name # __name__ is not defined all the time
except AttributeError:
name = None
return '<%s: (%s).%s>' % (self.__class__.__name__, name, self.string_name)
@property
def api_type(self):
return self.infer_compiled_value().api_type
def infer(self):
return ValueSet([self.infer_compiled_value()])
@memoize_method
def infer_compiled_value(self):
return create_from_name(self._inference_state, self._parent_value, self.string_name)
class SignatureParamName(ParamNameInterface, AbstractNameDefinition):
def __init__(self, compiled_value, signature_param):
self.parent_context = compiled_value.parent_context
self._signature_param = signature_param
@property
def string_name(self):
return self._signature_param.name
def to_string(self):
s = self._kind_string() + self.string_name
if self._signature_param.has_annotation:
s += ': ' + self._signature_param.annotation_string
if self._signature_param.has_default:
s += '=' + self._signature_param.default_string
return s
def get_kind(self):
return getattr(Parameter, self._signature_param.kind_name)
def infer(self):
p = self._signature_param
inference_state = self.parent_context.inference_state
values = NO_VALUES
if p.has_default:
values = ValueSet([create_from_access_path(inference_state, p.default)])
if p.has_annotation:
annotation = create_from_access_path(inference_state, p.annotation)
values |= annotation.execute_with_values()
return values
class UnresolvableParamName(ParamNameInterface, AbstractNameDefinition):
def __init__(self, compiled_value, name, default):
self.parent_context = compiled_value.parent_context
self.string_name = name
self._default = default
def get_kind(self):
return Parameter.POSITIONAL_ONLY
def to_string(self):
string = self.string_name
if self._default:
string += '=' + self._default
return string
def infer(self):
return NO_VALUES
class CompiledValueName(ValueNameMixin, AbstractNameDefinition):
def __init__(self, value, name):
self.string_name = name
self._value = value
self.parent_context = value.parent_context
class EmptyCompiledName(AbstractNameDefinition):
"""
Accessing some names will raise an exception. To avoid not having any
completions, just give Jedi the option to return this object. It infers to
nothing.
"""
def __init__(self, inference_state, name):
self.parent_context = inference_state.builtins_module
self.string_name = name
def infer(self):
return NO_VALUES
class CompiledValueFilter(AbstractFilter):
def __init__(self, inference_state, compiled_value, is_instance=False):
self._inference_state = inference_state
self.compiled_value = compiled_value
self.is_instance = is_instance
def get(self, name):
access_handle = self.compiled_value.access_handle
return self._get(
name,
lambda name, safe: access_handle.is_allowed_getattr(name, safe=safe),
lambda name: name in access_handle.dir(),
check_has_attribute=True
)
def _get(self, name, allowed_getattr_callback, in_dir_callback, check_has_attribute=False):
"""
To remove quite a few access calls we introduced the callback here.
"""
if self._inference_state.allow_descriptor_getattr:
pass
has_attribute, is_descriptor = allowed_getattr_callback(
name,
safe=not self._inference_state.allow_descriptor_getattr
)
if check_has_attribute and not has_attribute:
return []
if (is_descriptor or not has_attribute) \
and not self._inference_state.allow_descriptor_getattr:
return [self._get_cached_name(name, is_empty=True)]
if self.is_instance and not in_dir_callback(name):
return []
return [self._get_cached_name(name)]
@memoize_method
def _get_cached_name(self, name, is_empty=False):
if is_empty:
return EmptyCompiledName(self._inference_state, name)
else:
return self._create_name(name)
def values(self):
from jedi.inference.compiled import builtin_from_name
names = []
needs_type_completions, dir_infos = self.compiled_value.access_handle.get_dir_infos()
# We could use `safe=False` here as well, especially as a parameter to
# get_dir_infos. But this would lead to a lot of property executions
# that are probably not wanted. The drawback for this is that we
# have a different name for `get` and `values`. For `get` we always
# execute.
for name in dir_infos:
names += self._get(
name,
lambda name, safe: dir_infos[name],
lambda name: name in dir_infos,
)
# ``dir`` doesn't include the type names.
if not self.is_instance and needs_type_completions:
for filter in builtin_from_name(self._inference_state, 'type').get_filters():
names += filter.values()
return names
def _create_name(self, name):
return CompiledName(
self._inference_state,
self.compiled_value,
name
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.compiled_value)
docstr_defaults = {
'floating point number': 'float',
'character': 'str',
'integer': 'int',
'dictionary': 'dict',
'string': 'str',
}
def _parse_function_doc(doc):
"""
Takes a function and returns the params and return value as a tuple.
This is nothing more than a docstring parser.
TODO docstrings like utime(path, (atime, mtime)) and a(b [, b]) -> None
TODO docstrings like 'tuple of integers'
"""
# parse round parentheses: def func(a, (b,c))
try:
count = 0
start = doc.index('(')
for i, s in enumerate(doc[start:]):
if s == '(':
count += 1
elif s == ')':
count -= 1
if count == 0:
end = start + i
break
param_str = doc[start + 1:end]
except (ValueError, UnboundLocalError):
# ValueError for doc.index
# UnboundLocalError for undefined end in last line
debug.dbg('no brackets found - no param')
end = 0
param_str = ''
else:
# remove square brackets, that show an optional param ( = None)
def change_options(m):
args = m.group(1).split(',')
for i, a in enumerate(args):
if a and '=' not in a:
args[i] += '=None'
return ','.join(args)
while True:
param_str, changes = re.subn(r' ?\[([^\[\]]+)\]',
change_options, param_str)
if changes == 0:
break
param_str = param_str.replace('-', '_') # see: isinstance.__doc__
# parse return value
r = re.search('-[>-]* ', doc[end:end + 7])
if r is None:
ret = ''
else:
index = end + r.end()
# get result type, which can contain newlines
pattern = re.compile(r'(,\n|[^\n-])+')
ret_str = pattern.match(doc, index).group(0).strip()
# New object -> object()
ret_str = re.sub(r'[nN]ew (.*)', r'\1()', ret_str)
ret = docstr_defaults.get(ret_str, ret_str)
return param_str, ret
def create_from_name(inference_state, compiled_value, name):
access_paths = compiled_value.access_handle.getattr_paths(name, default=None)
value = None
for access_path in access_paths:
value = create_cached_compiled_value(
inference_state,
access_path,
parent_context=None if value is None else value.as_context(),
)
return value
def _normalize_create_args(func):
"""The cache doesn't care about keyword vs. normal args."""
def wrapper(inference_state, obj, parent_context=None):
return func(inference_state, obj, parent_context)
return wrapper
def create_from_access_path(inference_state, access_path):
value = None
for name, access in access_path.accesses:
value = create_cached_compiled_value(
inference_state,
access,
parent_context=None if value is None else value.as_context()
)
return value
@_normalize_create_args
@inference_state_function_cache()
def create_cached_compiled_value(inference_state, access_handle, parent_context):
assert not isinstance(parent_context, CompiledValue)
if parent_context is None:
cls = CompiledModule
else:
cls = CompiledValue
return cls(inference_state, access_handle, parent_context)

View File

@ -0,0 +1,499 @@
from abc import abstractmethod
from contextlib import contextmanager
from pathlib import Path
from typing import Optional
from parso.tree import search_ancestor
from parso.python.tree import Name
from jedi.inference.filters import ParserTreeFilter, MergedFilter, \
GlobalNameFilter
from jedi.inference.names import AnonymousParamName, TreeNameDefinition
from jedi.inference.base_value import NO_VALUES, ValueSet
from jedi.parser_utils import get_parent_scope
from jedi import debug
from jedi import parser_utils
class AbstractContext:
# Must be defined: inference_state and tree_node and parent_context as an attribute/property
def __init__(self, inference_state):
self.inference_state = inference_state
self.predefined_names = {}
@abstractmethod
def get_filters(self, until_position=None, origin_scope=None):
raise NotImplementedError
def goto(self, name_or_str, position):
from jedi.inference import finder
filters = _get_global_filters_for_name(
self, name_or_str if isinstance(name_or_str, Name) else None, position,
)
names = finder.filter_name(filters, name_or_str)
debug.dbg('context.goto %s in (%s): %s', name_or_str, self, names)
return names
def py__getattribute__(self, name_or_str, name_context=None, position=None,
analysis_errors=True):
"""
:param position: Position of the last statement -> tuple of line, column
"""
if name_context is None:
name_context = self
names = self.goto(name_or_str, position)
string_name = name_or_str.value if isinstance(name_or_str, Name) else name_or_str
# This paragraph is currently needed for proper branch type inference
# (static analysis).
found_predefined_types = None
if self.predefined_names and isinstance(name_or_str, Name):
node = name_or_str
while node is not None and not parser_utils.is_scope(node):
node = node.parent
if node.type in ("if_stmt", "for_stmt", "comp_for", 'sync_comp_for'):
try:
name_dict = self.predefined_names[node]
types = name_dict[string_name]
except KeyError:
continue
else:
found_predefined_types = types
break
if found_predefined_types is not None and names:
from jedi.inference import flow_analysis
check = flow_analysis.reachability_check(
context=self,
value_scope=self.tree_node,
node=name_or_str,
)
if check is flow_analysis.UNREACHABLE:
values = NO_VALUES
else:
values = found_predefined_types
else:
values = ValueSet.from_sets(name.infer() for name in names)
if not names and not values and analysis_errors:
if isinstance(name_or_str, Name):
from jedi.inference import analysis
message = ("NameError: name '%s' is not defined." % string_name)
analysis.add(name_context, 'name-error', name_or_str, message)
debug.dbg('context.names_to_types: %s -> %s', names, values)
if values:
return values
return self._check_for_additional_knowledge(name_or_str, name_context, position)
def _check_for_additional_knowledge(self, name_or_str, name_context, position):
name_context = name_context or self
# Add isinstance and other if/assert knowledge.
if isinstance(name_or_str, Name) and not name_context.is_instance():
flow_scope = name_or_str
base_nodes = [name_context.tree_node]
if any(b.type in ('comp_for', 'sync_comp_for') for b in base_nodes):
return NO_VALUES
from jedi.inference.finder import check_flow_information
while True:
flow_scope = get_parent_scope(flow_scope, include_flows=True)
n = check_flow_information(name_context, flow_scope,
name_or_str, position)
if n is not None:
return n
if flow_scope in base_nodes:
break
return NO_VALUES
def get_root_context(self):
parent_context = self.parent_context
if parent_context is None:
return self
return parent_context.get_root_context()
def is_module(self):
return False
def is_builtins_module(self):
return False
def is_class(self):
return False
def is_stub(self):
return False
def is_instance(self):
return False
def is_compiled(self):
return False
def is_bound_method(self):
return False
@abstractmethod
def py__name__(self):
raise NotImplementedError
def get_value(self):
raise NotImplementedError
@property
def name(self):
return None
def get_qualified_names(self):
return ()
def py__doc__(self):
return ''
@contextmanager
def predefine_names(self, flow_scope, dct):
predefined = self.predefined_names
predefined[flow_scope] = dct
try:
yield
finally:
del predefined[flow_scope]
class ValueContext(AbstractContext):
"""
Should be defined, otherwise the API returns empty types.
"""
def __init__(self, value):
super().__init__(value.inference_state)
self._value = value
@property
def tree_node(self):
return self._value.tree_node
@property
def parent_context(self):
return self._value.parent_context
def is_module(self):
return self._value.is_module()
def is_builtins_module(self):
return self._value == self.inference_state.builtins_module
def is_class(self):
return self._value.is_class()
def is_stub(self):
return self._value.is_stub()
def is_instance(self):
return self._value.is_instance()
def is_compiled(self):
return self._value.is_compiled()
def is_bound_method(self):
return self._value.is_bound_method()
def py__name__(self):
return self._value.py__name__()
@property
def name(self):
return self._value.name
def get_qualified_names(self):
return self._value.get_qualified_names()
def py__doc__(self):
return self._value.py__doc__()
def get_value(self):
return self._value
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._value)
class TreeContextMixin:
def infer_node(self, node):
from jedi.inference.syntax_tree import infer_node
return infer_node(self, node)
def create_value(self, node):
from jedi.inference import value
if node == self.tree_node:
assert self.is_module()
return self.get_value()
parent_context = self.create_context(node)
if node.type in ('funcdef', 'lambdef'):
func = value.FunctionValue.from_context(parent_context, node)
if parent_context.is_class():
class_value = parent_context.parent_context.create_value(parent_context.tree_node)
instance = value.AnonymousInstance(
self.inference_state, parent_context.parent_context, class_value)
func = value.BoundMethod(
instance=instance,
class_context=class_value.as_context(),
function=func
)
return func
elif node.type == 'classdef':
return value.ClassValue(self.inference_state, parent_context, node)
else:
raise NotImplementedError("Probably shouldn't happen: %s" % node)
def create_context(self, node):
def from_scope_node(scope_node, is_nested=True):
if scope_node == self.tree_node:
return self
if scope_node.type in ('funcdef', 'lambdef', 'classdef'):
return self.create_value(scope_node).as_context()
elif scope_node.type in ('comp_for', 'sync_comp_for'):
parent_context = from_scope_node(parent_scope(scope_node.parent))
if node.start_pos >= scope_node.children[-1].start_pos:
return parent_context
return CompForContext(parent_context, scope_node)
raise Exception("There's a scope that was not managed: %s" % scope_node)
def parent_scope(node):
while True:
node = node.parent
if parser_utils.is_scope(node):
return node
elif node.type in ('argument', 'testlist_comp'):
if node.children[1].type in ('comp_for', 'sync_comp_for'):
return node.children[1]
elif node.type == 'dictorsetmaker':
for n in node.children[1:4]:
# In dictionaries it can be pretty much anything.
if n.type in ('comp_for', 'sync_comp_for'):
return n
scope_node = parent_scope(node)
if scope_node.type in ('funcdef', 'classdef'):
colon = scope_node.children[scope_node.children.index(':')]
if node.start_pos < colon.start_pos:
parent = node.parent
if not (parent.type == 'param' and parent.name == node):
scope_node = parent_scope(scope_node)
return from_scope_node(scope_node, is_nested=True)
def create_name(self, tree_name):
definition = tree_name.get_definition()
if definition and definition.type == 'param' and definition.name == tree_name:
funcdef = search_ancestor(definition, 'funcdef', 'lambdef')
func = self.create_value(funcdef)
return AnonymousParamName(func, tree_name)
else:
context = self.create_context(tree_name)
return TreeNameDefinition(context, tree_name)
class FunctionContext(TreeContextMixin, ValueContext):
def get_filters(self, until_position=None, origin_scope=None):
yield ParserTreeFilter(
self.inference_state,
parent_context=self,
until_position=until_position,
origin_scope=origin_scope
)
class ModuleContext(TreeContextMixin, ValueContext):
def py__file__(self) -> Optional[Path]:
return self._value.py__file__() # type: ignore[no-any-return]
def get_filters(self, until_position=None, origin_scope=None):
filters = self._value.get_filters(origin_scope)
# Skip the first filter and replace it.
next(filters, None)
yield MergedFilter(
ParserTreeFilter(
parent_context=self,
until_position=until_position,
origin_scope=origin_scope
),
self.get_global_filter(),
)
yield from filters
def get_global_filter(self):
return GlobalNameFilter(self)
@property
def string_names(self):
return self._value.string_names
@property
def code_lines(self):
return self._value.code_lines
def get_value(self):
"""
This is the only function that converts a context back to a value.
This is necessary for stub -> python conversion and vice versa. However
this method shouldn't be moved to AbstractContext.
"""
return self._value
class NamespaceContext(TreeContextMixin, ValueContext):
def get_filters(self, until_position=None, origin_scope=None):
return self._value.get_filters()
def get_value(self):
return self._value
@property
def string_names(self):
return self._value.string_names
def py__file__(self) -> Optional[Path]:
return self._value.py__file__() # type: ignore[no-any-return]
class ClassContext(TreeContextMixin, ValueContext):
def get_filters(self, until_position=None, origin_scope=None):
yield self.get_global_filter(until_position, origin_scope)
def get_global_filter(self, until_position=None, origin_scope=None):
return ParserTreeFilter(
parent_context=self,
until_position=until_position,
origin_scope=origin_scope
)
class CompForContext(TreeContextMixin, AbstractContext):
def __init__(self, parent_context, comp_for):
super().__init__(parent_context.inference_state)
self.tree_node = comp_for
self.parent_context = parent_context
def get_filters(self, until_position=None, origin_scope=None):
yield ParserTreeFilter(self)
def get_value(self):
return None
def py__name__(self):
return '<comprehension context>'
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.tree_node)
class CompiledContext(ValueContext):
def get_filters(self, until_position=None, origin_scope=None):
return self._value.get_filters()
class CompiledModuleContext(CompiledContext):
code_lines = None
def get_value(self):
return self._value
@property
def string_names(self):
return self._value.string_names
def py__file__(self) -> Optional[Path]:
return self._value.py__file__() # type: ignore[no-any-return]
def _get_global_filters_for_name(context, name_or_none, position):
# For functions and classes the defaults don't belong to the
# function and get inferred in the value before the function. So
# make sure to exclude the function/class name.
if name_or_none is not None:
ancestor = search_ancestor(name_or_none, 'funcdef', 'classdef', 'lambdef')
lambdef = None
if ancestor == 'lambdef':
# For lambdas it's even more complicated since parts will
# be inferred later.
lambdef = ancestor
ancestor = search_ancestor(name_or_none, 'funcdef', 'classdef')
if ancestor is not None:
colon = ancestor.children[-2]
if position is not None and position < colon.start_pos:
if lambdef is None or position < lambdef.children[-2].start_pos:
position = ancestor.start_pos
return get_global_filters(context, position, name_or_none)
def get_global_filters(context, until_position, origin_scope):
"""
Returns all filters in order of priority for name resolution.
For global name lookups. The filters will handle name resolution
themselves, but here we gather possible filters downwards.
>>> from jedi import Script
>>> script = Script('''
... x = ['a', 'b', 'c']
... def func():
... y = None
... ''')
>>> module_node = script._module_node
>>> scope = next(module_node.iter_funcdefs())
>>> scope
<Function: func@3-5>
>>> context = script._get_module_context().create_context(scope)
>>> filters = list(get_global_filters(context, (4, 0), None))
First we get the names from the function scope.
>>> print(filters[0]) # doctest: +ELLIPSIS
MergedFilter(<ParserTreeFilter: ...>, <GlobalNameFilter: ...>)
>>> sorted(str(n) for n in filters[0].values()) # doctest: +NORMALIZE_WHITESPACE
['<TreeNameDefinition: string_name=func start_pos=(3, 4)>',
'<TreeNameDefinition: string_name=x start_pos=(2, 0)>']
>>> filters[0]._filters[0]._until_position
(4, 0)
>>> filters[0]._filters[1]._until_position
Then it yields the names from one level "lower". In this example, this is
the module scope (including globals).
As a side note, you can see, that the position in the filter is None on the
globals filter, because there the whole module is searched.
>>> list(filters[1].values()) # package modules -> Also empty.
[]
>>> sorted(name.string_name for name in filters[2].values()) # Module attributes
['__doc__', '__name__', '__package__']
Finally, it yields the builtin filter, if `include_builtin` is
true (default).
>>> list(filters[3].values()) # doctest: +ELLIPSIS
[...]
"""
base_context = context
from jedi.inference.value.function import BaseFunctionExecutionContext
while context is not None:
# Names in methods cannot be resolved within the class.
yield from context.get_filters(
until_position=until_position,
origin_scope=origin_scope
)
if isinstance(context, (BaseFunctionExecutionContext, ModuleContext)):
# The position should be reset if the current scope is a function.
until_position = None
context = context.parent_context
b = next(base_context.inference_state.builtins_module.get_filters(), None)
assert b is not None
# Add builtins to the global scope.
yield b

View File

@ -0,0 +1,21 @@
from jedi.inference.value import ModuleValue
from jedi.inference.context import ModuleContext
class DocstringModule(ModuleValue):
def __init__(self, in_module_context, **kwargs):
super().__init__(**kwargs)
self._in_module_context = in_module_context
def _as_context(self):
return DocstringModuleContext(self, self._in_module_context)
class DocstringModuleContext(ModuleContext):
def __init__(self, module_value, in_module_context):
super().__init__(module_value)
self._in_module_context = in_module_context
def get_filters(self, origin_scope=None, until_position=None):
yield from super().get_filters(until_position=until_position)
yield from self._in_module_context.get_filters()

View File

@ -0,0 +1,286 @@
"""
Docstrings are another source of information for functions and classes.
:mod:`jedi.inference.dynamic_params` tries to find all executions of functions,
while the docstring parsing is much easier. There are three different types of
docstrings that |jedi| understands:
- `Sphinx <http://sphinx-doc.org/markup/desc.html#info-field-lists>`_
- `Epydoc <http://epydoc.sourceforge.net/manual-fields.html>`_
- `Numpydoc <https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt>`_
For example, the sphinx annotation ``:type foo: str`` clearly states that the
type of ``foo`` is ``str``.
As an addition to parameter searching, this module also provides return
annotations.
"""
import re
import warnings
from parso import parse, ParserSyntaxError
from jedi import debug
from jedi.inference.cache import inference_state_method_cache
from jedi.inference.base_value import iterator_to_value_set, ValueSet, \
NO_VALUES
from jedi.inference.lazy_value import LazyKnownValues
DOCSTRING_PARAM_PATTERNS = [
r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx
r'\s*:param\s+(\w+)\s+%s:[^\n]*', # Sphinx param with type
r'\s*@type\s+%s:\s*([^\n]+)', # Epydoc
]
DOCSTRING_RETURN_PATTERNS = [
re.compile(r'\s*:rtype:\s*([^\n]+)', re.M), # Sphinx
re.compile(r'\s*@rtype:\s*([^\n]+)', re.M), # Epydoc
]
REST_ROLE_PATTERN = re.compile(r':[^`]+:`([^`]+)`')
_numpy_doc_string_cache = None
def _get_numpy_doc_string_cls():
global _numpy_doc_string_cache
if isinstance(_numpy_doc_string_cache, (ImportError, SyntaxError)):
raise _numpy_doc_string_cache
from numpydoc.docscrape import NumpyDocString # type: ignore[import]
_numpy_doc_string_cache = NumpyDocString
return _numpy_doc_string_cache
def _search_param_in_numpydocstr(docstr, param_str):
"""Search `docstr` (in numpydoc format) for type(-s) of `param_str`."""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
# This is a non-public API. If it ever changes we should be
# prepared and return gracefully.
params = _get_numpy_doc_string_cls()(docstr)._parsed_data['Parameters']
except Exception:
return []
for p_name, p_type, p_descr in params:
if p_name == param_str:
m = re.match(r'([^,]+(,[^,]+)*?)(,[ ]*optional)?$', p_type)
if m:
p_type = m.group(1)
return list(_expand_typestr(p_type))
return []
def _search_return_in_numpydocstr(docstr):
"""
Search `docstr` (in numpydoc format) for type(-s) of function returns.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
doc = _get_numpy_doc_string_cls()(docstr)
except Exception:
return
try:
# This is a non-public API. If it ever changes we should be
# prepared and return gracefully.
returns = doc._parsed_data['Returns']
returns += doc._parsed_data['Yields']
except Exception:
return
for r_name, r_type, r_descr in returns:
# Return names are optional and if so the type is in the name
if not r_type:
r_type = r_name
yield from _expand_typestr(r_type)
def _expand_typestr(type_str):
"""
Attempts to interpret the possible types in `type_str`
"""
# Check if alternative types are specified with 'or'
if re.search(r'\bor\b', type_str):
for t in type_str.split('or'):
yield t.split('of')[0].strip()
# Check if like "list of `type`" and set type to list
elif re.search(r'\bof\b', type_str):
yield type_str.split('of')[0]
# Check if type has is a set of valid literal values eg: {'C', 'F', 'A'}
elif type_str.startswith('{'):
node = parse(type_str, version='3.7').children[0]
if node.type == 'atom':
for leaf in getattr(node.children[1], "children", []):
if leaf.type == 'number':
if '.' in leaf.value:
yield 'float'
else:
yield 'int'
elif leaf.type == 'string':
if 'b' in leaf.string_prefix.lower():
yield 'bytes'
else:
yield 'str'
# Ignore everything else.
# Otherwise just work with what we have.
else:
yield type_str
def _search_param_in_docstr(docstr, param_str):
"""
Search `docstr` for type(-s) of `param_str`.
>>> _search_param_in_docstr(':type param: int', 'param')
['int']
>>> _search_param_in_docstr('@type param: int', 'param')
['int']
>>> _search_param_in_docstr(
... ':type param: :class:`threading.Thread`', 'param')
['threading.Thread']
>>> bool(_search_param_in_docstr('no document', 'param'))
False
>>> _search_param_in_docstr(':param int param: some description', 'param')
['int']
"""
# look at #40 to see definitions of those params
patterns = [re.compile(p % re.escape(param_str))
for p in DOCSTRING_PARAM_PATTERNS]
for pattern in patterns:
match = pattern.search(docstr)
if match:
return [_strip_rst_role(match.group(1))]
return _search_param_in_numpydocstr(docstr, param_str)
def _strip_rst_role(type_str):
"""
Strip off the part looks like a ReST role in `type_str`.
>>> _strip_rst_role(':class:`ClassName`') # strip off :class:
'ClassName'
>>> _strip_rst_role(':py:obj:`module.Object`') # works with domain
'module.Object'
>>> _strip_rst_role('ClassName') # do nothing when not ReST role
'ClassName'
See also:
http://sphinx-doc.org/domains.html#cross-referencing-python-objects
"""
match = REST_ROLE_PATTERN.match(type_str)
if match:
return match.group(1)
else:
return type_str
def _infer_for_statement_string(module_context, string):
if string is None:
return []
potential_imports = re.findall(r'((?:\w+\.)*\w+)\.', string)
# Try to import module part in dotted name.
# (e.g., 'threading' in 'threading.Thread').
imports = "\n".join(f"import {p}" for p in potential_imports)
string = f'{imports}\n{string}'
debug.dbg('Parse docstring code %s', string, color='BLUE')
grammar = module_context.inference_state.grammar
try:
module = grammar.parse(string, error_recovery=False)
except ParserSyntaxError:
return []
try:
# It's not the last item, because that's an end marker.
stmt = module.children[-2]
except (AttributeError, IndexError):
return []
if stmt.type not in ('name', 'atom', 'atom_expr'):
return []
# Here we basically use a fake module that also uses the filters in
# the actual module.
from jedi.inference.docstring_utils import DocstringModule
m = DocstringModule(
in_module_context=module_context,
inference_state=module_context.inference_state,
module_node=module,
code_lines=[],
)
return list(_execute_types_in_stmt(m.as_context(), stmt))
def _execute_types_in_stmt(module_context, stmt):
"""
Executing all types or general elements that we find in a statement. This
doesn't include tuple, list and dict literals, because the stuff they
contain is executed. (Used as type information).
"""
definitions = module_context.infer_node(stmt)
return ValueSet.from_sets(
_execute_array_values(module_context.inference_state, d)
for d in definitions
)
def _execute_array_values(inference_state, array):
"""
Tuples indicate that there's not just one return value, but the listed
ones. `(str, int)` means that it returns a tuple with both types.
"""
from jedi.inference.value.iterable import SequenceLiteralValue, FakeTuple, FakeList
if isinstance(array, SequenceLiteralValue) and array.array_type in ('tuple', 'list'):
values = []
for lazy_value in array.py__iter__():
objects = ValueSet.from_sets(
_execute_array_values(inference_state, typ)
for typ in lazy_value.infer()
)
values.append(LazyKnownValues(objects))
cls = FakeTuple if array.array_type == 'tuple' else FakeList
return {cls(inference_state, values)}
else:
return array.execute_annotation()
@inference_state_method_cache()
def infer_param(function_value, param):
def infer_docstring(docstring):
return ValueSet(
p
for param_str in _search_param_in_docstr(docstring, param.name.value)
for p in _infer_for_statement_string(module_context, param_str)
)
module_context = function_value.get_root_context()
func = param.get_parent_function()
if func.type == 'lambdef':
return NO_VALUES
types = infer_docstring(function_value.py__doc__())
if function_value.is_bound_method() \
and function_value.py__name__() == '__init__':
types |= infer_docstring(function_value.class_context.py__doc__())
debug.dbg('Found param types for docstring: %s', types, color='BLUE')
return types
@inference_state_method_cache()
@iterator_to_value_set
def infer_return_types(function_value):
def search_return_in_docstr(code):
for p in DOCSTRING_RETURN_PATTERNS:
match = p.search(code)
if match:
yield _strip_rst_role(match.group(1))
# Check for numpy style return hint
yield from _search_return_in_numpydocstr(code)
for type_str in search_return_in_docstr(function_value.py__doc__()):
yield from _infer_for_statement_string(function_value.get_root_context(), type_str)

View File

@ -0,0 +1,224 @@
"""
One of the really important features of |jedi| is to have an option to
understand code like this::
def foo(bar):
bar. # completion here
foo(1)
There's no doubt wheter bar is an ``int`` or not, but if there's also a call
like ``foo('str')``, what would happen? Well, we'll just show both. Because
that's what a human would expect.
It works as follows:
- |Jedi| sees a param
- search for function calls named ``foo``
- execute these calls and check the input.
"""
from jedi import settings
from jedi import debug
from jedi.parser_utils import get_parent_scope
from jedi.inference.cache import inference_state_method_cache
from jedi.inference.arguments import TreeArguments
from jedi.inference.param import get_executed_param_names
from jedi.inference.helpers import is_stdlib_path
from jedi.inference.utils import to_list
from jedi.inference.value import instance
from jedi.inference.base_value import ValueSet, NO_VALUES
from jedi.inference.references import get_module_contexts_containing_name
from jedi.inference import recursion
MAX_PARAM_SEARCHES = 20
def _avoid_recursions(func):
def wrapper(function_value, param_index):
inf = function_value.inference_state
with recursion.execution_allowed(inf, function_value.tree_node) as allowed:
# We need to catch recursions that may occur, because an
# anonymous functions can create an anonymous parameter that is
# more or less self referencing.
if allowed:
inf.dynamic_params_depth += 1
try:
return func(function_value, param_index)
finally:
inf.dynamic_params_depth -= 1
return NO_VALUES
return wrapper
@debug.increase_indent
@_avoid_recursions
def dynamic_param_lookup(function_value, param_index):
"""
A dynamic search for param values. If you try to complete a type:
>>> def func(foo):
... foo
>>> func(1)
>>> func("")
It is not known what the type ``foo`` without analysing the whole code. You
have to look for all calls to ``func`` to find out what ``foo`` possibly
is.
"""
funcdef = function_value.tree_node
if not settings.dynamic_params:
return NO_VALUES
path = function_value.get_root_context().py__file__()
if path is not None and is_stdlib_path(path):
# We don't want to search for references in the stdlib. Usually people
# don't work with it (except if you are a core maintainer, sorry).
# This makes everything slower. Just disable it and run the tests,
# you will see the slowdown, especially in 3.6.
return NO_VALUES
if funcdef.type == 'lambdef':
string_name = _get_lambda_name(funcdef)
if string_name is None:
return NO_VALUES
else:
string_name = funcdef.name.value
debug.dbg('Dynamic param search in %s.', string_name, color='MAGENTA')
module_context = function_value.get_root_context()
arguments_list = _search_function_arguments(module_context, funcdef, string_name)
values = ValueSet.from_sets(
get_executed_param_names(
function_value, arguments
)[param_index].infer()
for arguments in arguments_list
)
debug.dbg('Dynamic param result finished', color='MAGENTA')
return values
@inference_state_method_cache(default=None)
@to_list
def _search_function_arguments(module_context, funcdef, string_name):
"""
Returns a list of param names.
"""
compare_node = funcdef
if string_name == '__init__':
cls = get_parent_scope(funcdef)
if cls.type == 'classdef':
string_name = cls.name.value
compare_node = cls
found_arguments = False
i = 0
inference_state = module_context.inference_state
if settings.dynamic_params_for_other_modules:
module_contexts = get_module_contexts_containing_name(
inference_state, [module_context], string_name,
# Limit the amounts of files to be opened massively.
limit_reduction=5,
)
else:
module_contexts = [module_context]
for for_mod_context in module_contexts:
for name, trailer in _get_potential_nodes(for_mod_context, string_name):
i += 1
# This is a simple way to stop Jedi's dynamic param recursion
# from going wild: The deeper Jedi's in the recursion, the less
# code should be inferred.
if i * inference_state.dynamic_params_depth > MAX_PARAM_SEARCHES:
return
random_context = for_mod_context.create_context(name)
for arguments in _check_name_for_execution(
inference_state, random_context, compare_node, name, trailer):
found_arguments = True
yield arguments
# If there are results after processing a module, we're probably
# good to process. This is a speed optimization.
if found_arguments:
return
def _get_lambda_name(node):
stmt = node.parent
if stmt.type == 'expr_stmt':
first_operator = next(stmt.yield_operators(), None)
if first_operator == '=':
first = stmt.children[0]
if first.type == 'name':
return first.value
return None
def _get_potential_nodes(module_value, func_string_name):
try:
names = module_value.tree_node.get_used_names()[func_string_name]
except KeyError:
return
for name in names:
bracket = name.get_next_leaf()
trailer = bracket.parent
if trailer.type == 'trailer' and bracket == '(':
yield name, trailer
def _check_name_for_execution(inference_state, context, compare_node, name, trailer):
from jedi.inference.value.function import BaseFunctionExecutionContext
def create_args(value):
arglist = trailer.children[1]
if arglist == ')':
arglist = None
args = TreeArguments(inference_state, context, arglist, trailer)
from jedi.inference.value.instance import InstanceArguments
if value.tree_node.type == 'classdef':
created_instance = instance.TreeInstance(
inference_state,
value.parent_context,
value,
args
)
return InstanceArguments(created_instance, args)
else:
if value.is_bound_method():
args = InstanceArguments(value.instance, args)
return args
for value in inference_state.infer(context, name):
value_node = value.tree_node
if compare_node == value_node:
yield create_args(value)
elif isinstance(value.parent_context, BaseFunctionExecutionContext) \
and compare_node.type == 'funcdef':
# Here we're trying to find decorators by checking the first
# parameter. It's not very generic though. Should find a better
# solution that also applies to nested decorators.
param_names = value.parent_context.get_param_names()
if len(param_names) != 1:
continue
values = param_names[0].infer()
if [v.tree_node for v in values] == [compare_node]:
# Found a decorator.
module_context = context.get_root_context()
execution_context = value.as_context(create_args(value))
potential_nodes = _get_potential_nodes(module_context, param_names[0].string_name)
for name, trailer in potential_nodes:
if value_node.start_pos < name.start_pos < value_node.end_pos:
random_context = execution_context.create_context(name)
yield from _check_name_for_execution(
inference_state,
random_context,
compare_node,
name,
trailer
)

View File

@ -0,0 +1,371 @@
"""
Filters are objects that you can use to filter names in different scopes. They
are needed for name resolution.
"""
from abc import abstractmethod
from typing import List, MutableMapping, Type
import weakref
from parso.tree import search_ancestor
from parso.python.tree import Name, UsedNamesMapping
from jedi.inference import flow_analysis
from jedi.inference.base_value import ValueSet, ValueWrapper, \
LazyValueWrapper
from jedi.parser_utils import get_cached_parent_scope, get_parso_cache_node
from jedi.inference.utils import to_list
from jedi.inference.names import TreeNameDefinition, ParamName, \
AnonymousParamName, AbstractNameDefinition, NameWrapper
_definition_name_cache: MutableMapping[UsedNamesMapping, List[Name]]
_definition_name_cache = weakref.WeakKeyDictionary()
class AbstractFilter:
_until_position = None
def _filter(self, names):
if self._until_position is not None:
return [n for n in names if n.start_pos < self._until_position]
return names
@abstractmethod
def get(self, name):
raise NotImplementedError
@abstractmethod
def values(self):
raise NotImplementedError
class FilterWrapper:
name_wrapper_class: Type[NameWrapper]
def __init__(self, wrapped_filter):
self._wrapped_filter = wrapped_filter
def wrap_names(self, names):
return [self.name_wrapper_class(name) for name in names]
def get(self, name):
return self.wrap_names(self._wrapped_filter.get(name))
def values(self):
return self.wrap_names(self._wrapped_filter.values())
def _get_definition_names(parso_cache_node, used_names, name_key):
if parso_cache_node is None:
names = used_names.get(name_key, ())
return tuple(name for name in names if name.is_definition(include_setitem=True))
try:
for_module = _definition_name_cache[parso_cache_node]
except KeyError:
for_module = _definition_name_cache[parso_cache_node] = {}
try:
return for_module[name_key]
except KeyError:
names = used_names.get(name_key, ())
result = for_module[name_key] = tuple(
name for name in names if name.is_definition(include_setitem=True)
)
return result
class _AbstractUsedNamesFilter(AbstractFilter):
name_class = TreeNameDefinition
def __init__(self, parent_context, node_context=None):
if node_context is None:
node_context = parent_context
self._node_context = node_context
self._parser_scope = node_context.tree_node
module_context = node_context.get_root_context()
# It is quite hacky that we have to use that. This is for caching
# certain things with a WeakKeyDictionary. However, parso intentionally
# uses slots (to save memory) and therefore we end up with having to
# have a weak reference to the object that caches the tree.
#
# Previously we have tried to solve this by using a weak reference onto
# used_names. However that also does not work, because it has a
# reference from the module, which itself is referenced by any node
# through parents.
path = module_context.py__file__()
if path is None:
# If the path is None, there is no guarantee that parso caches it.
self._parso_cache_node = None
else:
self._parso_cache_node = get_parso_cache_node(
module_context.inference_state.latest_grammar
if module_context.is_stub() else module_context.inference_state.grammar,
path
)
self._used_names = module_context.tree_node.get_used_names()
self.parent_context = parent_context
def get(self, name):
return self._convert_names(self._filter(
_get_definition_names(self._parso_cache_node, self._used_names, name),
))
def _convert_names(self, names):
return [self.name_class(self.parent_context, name) for name in names]
def values(self):
return self._convert_names(
name
for name_key in self._used_names
for name in self._filter(
_get_definition_names(self._parso_cache_node, self._used_names, name_key),
)
)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.parent_context)
class ParserTreeFilter(_AbstractUsedNamesFilter):
def __init__(self, parent_context, node_context=None, until_position=None,
origin_scope=None):
"""
node_context is an option to specify a second value for use cases
like the class mro where the parent class of a new name would be the
value, but for some type inference it's important to have a local
value of the other classes.
"""
super().__init__(parent_context, node_context)
self._origin_scope = origin_scope
self._until_position = until_position
def _filter(self, names):
names = super()._filter(names)
names = [n for n in names if self._is_name_reachable(n)]
return list(self._check_flows(names))
def _is_name_reachable(self, name):
parent = name.parent
if parent.type == 'trailer':
return False
base_node = parent if parent.type in ('classdef', 'funcdef') else name
return get_cached_parent_scope(self._parso_cache_node, base_node) == self._parser_scope
def _check_flows(self, names):
for name in sorted(names, key=lambda name: name.start_pos, reverse=True):
check = flow_analysis.reachability_check(
context=self._node_context,
value_scope=self._parser_scope,
node=name,
origin_scope=self._origin_scope
)
if check is not flow_analysis.UNREACHABLE:
yield name
if check is flow_analysis.REACHABLE:
break
class _FunctionExecutionFilter(ParserTreeFilter):
def __init__(self, parent_context, function_value, until_position, origin_scope):
super().__init__(
parent_context,
until_position=until_position,
origin_scope=origin_scope,
)
self._function_value = function_value
def _convert_param(self, param, name):
raise NotImplementedError
@to_list
def _convert_names(self, names):
for name in names:
param = search_ancestor(name, 'param')
# Here we don't need to check if the param is a default/annotation,
# because those are not definitions and never make it to this
# point.
if param:
yield self._convert_param(param, name)
else:
yield TreeNameDefinition(self.parent_context, name)
class FunctionExecutionFilter(_FunctionExecutionFilter):
def __init__(self, *args, arguments, **kwargs):
super().__init__(*args, **kwargs)
self._arguments = arguments
def _convert_param(self, param, name):
return ParamName(self._function_value, name, self._arguments)
class AnonymousFunctionExecutionFilter(_FunctionExecutionFilter):
def _convert_param(self, param, name):
return AnonymousParamName(self._function_value, name)
class GlobalNameFilter(_AbstractUsedNamesFilter):
def get(self, name):
try:
names = self._used_names[name]
except KeyError:
return []
return self._convert_names(self._filter(names))
@to_list
def _filter(self, names):
for name in names:
if name.parent.type == 'global_stmt':
yield name
def values(self):
return self._convert_names(
name for name_list in self._used_names.values()
for name in self._filter(name_list)
)
class DictFilter(AbstractFilter):
def __init__(self, dct):
self._dct = dct
def get(self, name):
try:
value = self._convert(name, self._dct[name])
except KeyError:
return []
else:
return list(self._filter([value]))
def values(self):
def yielder():
for item in self._dct.items():
try:
yield self._convert(*item)
except KeyError:
pass
return self._filter(yielder())
def _convert(self, name, value):
return value
def __repr__(self):
keys = ', '.join(self._dct.keys())
return '<%s: for {%s}>' % (self.__class__.__name__, keys)
class MergedFilter:
def __init__(self, *filters):
self._filters = filters
def get(self, name):
return [n for filter in self._filters for n in filter.get(name)]
def values(self):
return [n for filter in self._filters for n in filter.values()]
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, ', '.join(str(f) for f in self._filters))
class _BuiltinMappedMethod(ValueWrapper):
"""``Generator.__next__`` ``dict.values`` methods and so on."""
api_type = 'function'
def __init__(self, value, method, builtin_func):
super().__init__(builtin_func)
self._value = value
self._method = method
def py__call__(self, arguments):
# TODO add TypeError if params are given/or not correct.
return self._method(self._value, arguments)
class SpecialMethodFilter(DictFilter):
"""
A filter for methods that are defined in this module on the corresponding
classes like Generator (for __next__, etc).
"""
class SpecialMethodName(AbstractNameDefinition):
api_type = 'function'
def __init__(self, parent_context, string_name, callable_, builtin_value):
self.parent_context = parent_context
self.string_name = string_name
self._callable = callable_
self._builtin_value = builtin_value
def infer(self):
for filter in self._builtin_value.get_filters():
# We can take the first index, because on builtin methods there's
# always only going to be one name. The same is true for the
# inferred values.
for name in filter.get(self.string_name):
builtin_func = next(iter(name.infer()))
break
else:
continue
break
return ValueSet([
_BuiltinMappedMethod(self.parent_context, self._callable, builtin_func)
])
def __init__(self, value, dct, builtin_value):
super().__init__(dct)
self.value = value
self._builtin_value = builtin_value
"""
This value is what will be used to introspect the name, where as the
other value will be used to execute the function.
We distinguish, because we have to.
"""
def _convert(self, name, value):
return self.SpecialMethodName(self.value, name, value, self._builtin_value)
class _OverwriteMeta(type):
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
base_dct = {}
for base_cls in reversed(cls.__bases__):
try:
base_dct.update(base_cls.overwritten_methods)
except AttributeError:
pass
for func in cls.__dict__.values():
try:
base_dct.update(func.registered_overwritten_methods)
except AttributeError:
pass
cls.overwritten_methods = base_dct
class _AttributeOverwriteMixin:
def get_filters(self, *args, **kwargs):
yield SpecialMethodFilter(self, self.overwritten_methods, self._wrapped_value)
yield from self._wrapped_value.get_filters(*args, **kwargs)
class LazyAttributeOverwrite(_AttributeOverwriteMixin, LazyValueWrapper,
metaclass=_OverwriteMeta):
def __init__(self, inference_state):
self.inference_state = inference_state
class AttributeOverwrite(_AttributeOverwriteMixin, ValueWrapper,
metaclass=_OverwriteMeta):
pass
def publish_method(method_name):
def decorator(func):
dct = func.__dict__.setdefault('registered_overwritten_methods', {})
dct[method_name] = func
return func
return decorator

Some files were not shown because too many files have changed in this diff Show More