Skip to content

Commit

Permalink
bump version, merge branch 'devel'
Browse files Browse the repository at this point in the history
  • Loading branch information
casperdcl committed Oct 8, 2020
2 parents 03a5d6c + ab32d02 commit 8f9e03d
Show file tree
Hide file tree
Showing 13 changed files with 106 additions and 84 deletions.
4 changes: 3 additions & 1 deletion .github/workflows/check.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,9 @@ jobs:
name: '${{ matrix.TOXENV }}'
strategy:
matrix:
TOXENV: [flake8, setup.py, perf]
TOXENV:
- flake8,setup.py
- perf
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
Expand Down
3 changes: 2 additions & 1 deletion .meta/.readme.rst
Original file line number Diff line number Diff line change
Expand Up @@ -583,7 +583,7 @@ However, it's best to check if `tqdm` can work without manual `position` first.
from time import sleep
from tqdm import trange, tqdm
from multiprocessing import Pool, freeze_support
from multiprocessing import Pool, RLock, freeze_support
L = list(range(9))
Expand All @@ -596,6 +596,7 @@ However, it's best to check if `tqdm` can work without manual `position` first.
if __name__ == '__main__':
freeze_support() # for Windows support
tqdm.set_lock(RLock()) # for managing output contention
p = Pool(initializer=tqdm.set_lock, initargs=(tqdm.get_lock(),))
p.map(progresser, L)
Expand Down
4 changes: 2 additions & 2 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -54,10 +54,10 @@ jobs:
python: 3.7
env: TOXENV=tf-no-keras
- name: pypy2.7
python: pypy2.7-7.1.1
python: pypy2.7-7.3.1
env: TOXENV=pypy
- name: pypy3.5
python: pypy3.6-7.1.1
python: pypy3.6-7.3.1
env: TOXENV=pypy3
- stage: development
name: py2.7-win
Expand Down
3 changes: 2 additions & 1 deletion DEMO.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -612,7 +612,7 @@
"```python\n",
"from time import sleep\n",
"from tqdm import trange, tqdm\n",
"from multiprocessing import Pool, freeze_support\n",
"from multiprocessing import Pool, RLock, freeze_support\n",
"\n",
"L = list(range(9))\n",
"\n",
Expand All @@ -625,6 +625,7 @@
"\n",
"if __name__ == '__main__':\n",
" freeze_support() # for Windows support\n",
" tqdm.set_lock(RLock()) # for managing output contention\n",
" p = Pool(initializer=tqdm.set_lock, initargs=(tqdm.get_lock(),))\n",
" p.map(progresser, L)\n",
"```\n",
Expand Down
11 changes: 6 additions & 5 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ test:
tox -e perf

testnose:
nosetests tqdm -d -v
nosetests -d -v tqdm

testsetup:
@make README.rst
Expand All @@ -62,14 +62,14 @@ testsetup:

testcoverage:
@make coverclean
nosetests tqdm --with-coverage --cover-package=tqdm --cover-erase --cover-min-percentage=80 --ignore-files="tests_perf\.py" -d -v
nosetests -d -v tqdm --ignore-files="tests_perf\.py" --with-coverage --cover-package=tqdm --cover-erase --cover-min-percentage=80

testperf:
# do not use coverage (which is extremely slow)
nosetests tqdm/tests/tests_perf.py -d -v
nosetests -d -v tqdm/tests/tests_perf.py

testtimer:
nosetests tqdm --with-timer -d -v
nosetests -d -v tqdm --with-timer

# another performance test, to check evolution across commits
testasv:
Expand Down Expand Up @@ -121,7 +121,8 @@ pre-commit:
# quick sanity checks
@make testsetup
flake8 -j 8 --count --statistics tqdm/ examples/
nosetests tqdm --ignore-files="tests_(perf|keras)\.py" -e "pandas|monitoring" -d
nosetests -d tqdm --ignore-files="tests_(perf|keras)\.py" -e "pandas|monitoring"
nosetests -d tqdm/tests/tests_perf.py -m basic_overhead
prebuildclean:
@+python -c "import shutil; shutil.rmtree('build', True)"
@+python -c "import shutil; shutil.rmtree('dist', True)"
Expand Down
3 changes: 2 additions & 1 deletion README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -798,7 +798,7 @@ However, it's best to check if `tqdm` can work without manual `position` first.
from time import sleep
from tqdm import trange, tqdm
from multiprocessing import Pool, freeze_support
from multiprocessing import Pool, RLock, freeze_support
L = list(range(9))
Expand All @@ -811,6 +811,7 @@ However, it's best to check if `tqdm` can work without manual `position` first.
if __name__ == '__main__':
freeze_support() # for Windows support
tqdm.set_lock(RLock()) # for managing output contention
p = Pool(initializer=tqdm.set_lock, initargs=(tqdm.get_lock(),))
p.map(progresser, L)
Expand Down
29 changes: 14 additions & 15 deletions examples/parallel_bars.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,15 @@
from __future__ import print_function
from time import sleep
from tqdm.auto import tqdm, trange
from tqdm.contrib.concurrent import process_map, thread_map
from random import random
from multiprocessing import Pool, freeze_support
from concurrent.futures import ThreadPoolExecutor
from threading import RLock
from functools import partial
from multiprocessing import Pool, RLock, freeze_support
from random import random
from threading import RLock as TRLock
from time import sleep
import sys

from tqdm.auto import tqdm, trange
from tqdm.contrib.concurrent import process_map, thread_map

NUM_SUBITERS = 9
PY2 = sys.version_info[:1] <= (2,)

Expand Down Expand Up @@ -47,17 +48,15 @@ def progresser(n, auto_position=True, write_safe=False, blocking=True,
sleep(0.01)

print("Multi-processing")
tqdm.set_lock(RLock())
p = Pool(initializer=tqdm.set_lock, initargs=(tqdm.get_lock(),))
p.map(partial(progresser, progress=True), L)

# unfortunately need ncols
# to print spaces over leftover multi-processing bars (#796)
with tqdm(leave=False) as t:
ncols = t.ncols or 80
print(("{msg:<{ncols}}").format(msg="Multi-threading", ncols=ncols))

# explicitly set just threading lock for nonblocking progress
tqdm.set_lock(RLock())
with ThreadPoolExecutor() as p:
print("Multi-threading")
tqdm.set_lock(TRLock())
pool_args = {}
if not PY2:
pool_args.update(initializer=tqdm.set_lock, initargs=(tqdm.get_lock(),))
with ThreadPoolExecutor(**pool_args) as p:
p.map(partial(progresser, progress=True, write_safe=not PY2,
blocking=False), L)
8 changes: 4 additions & 4 deletions tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ deps =
coverage
coveralls
commands =
nosetests --with-coverage --cover-package=tqdm --ignore-files="tests_perf\.py" -d -v tqdm/
nosetests -d -v --with-coverage --cover-package=tqdm --ignore-files="tests_perf\.py" tqdm/
- coveralls
- coverage xml
- curl -OL https://coverage.codacy.com/get.sh
Expand All @@ -28,7 +28,7 @@ deps =
nose-timer
codecov
commands =
nosetests --with-coverage --with-timer --cover-package=tqdm --ignore-files="tests_perf\.py" -d -v tqdm/
nosetests -d -v --with-coverage --with-timer --cover-package=tqdm --ignore-files="tests_perf\.py" tqdm/
- coveralls
codecov
- coverage xml
Expand Down Expand Up @@ -81,14 +81,14 @@ deps =
{[extra]deps}
tensorflow
commands =
nosetests --with-timer tqdm/tests/tests_keras.py -d -v
nosetests -d -v --with-timer tqdm/tests/tests_keras.py

[testenv:perf]
deps =
nose
nose-timer
commands =
nosetests --with-timer tqdm/tests/tests_perf.py -d -v
nosetests -d -v --with-timer tqdm/tests/tests_perf.py

[testenv:flake8]
deps = flake8
Expand Down
2 changes: 1 addition & 1 deletion tqdm/_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
__all__ = ["__version__"]

# major, minor, patch, -extra
version_info = 4, 50, 1
version_info = 4, 50, 2

# Nice string for the version
__version__ = '.'.join(map(str, version_info))
Expand Down
54 changes: 38 additions & 16 deletions tqdm/contrib/concurrent.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from __future__ import absolute_import
from tqdm import TqdmWarning
from tqdm.auto import tqdm as tqdm_auto
from contextlib import contextmanager
try:
from operator import length_hint
except ImportError:
Expand All @@ -26,6 +27,20 @@ def cpu_count():
__all__ = ['thread_map', 'process_map']


@contextmanager
def ensure_lock(tqdm_class, lock_name=""):
"""get (create if necessary) and then restore `tqdm_class`'s lock"""
old_lock = getattr(tqdm_class, '_lock', None) # don't create a new lock
lock = old_lock or tqdm_class.get_lock() # maybe create a new lock
lock = getattr(lock, lock_name, lock) # maybe subtype
tqdm_class.set_lock(lock)
yield lock
if old_lock is None:
del tqdm_class._lock
else:
tqdm_class.set_lock(old_lock)


def _executor_map(PoolExecutor, fn, *iterables, **tqdm_kwargs):
"""
Implementation of `thread_map` and `process_map`.
Expand All @@ -35,25 +50,27 @@ def _executor_map(PoolExecutor, fn, *iterables, **tqdm_kwargs):
tqdm_class : [default: tqdm.auto.tqdm].
max_workers : [default: min(32, cpu_count() + 4)].
chunksize : [default: 1].
lock_name : [default: "":str].
"""
kwargs = tqdm_kwargs.copy()
if "total" not in kwargs:
kwargs["total"] = len(iterables[0])
tqdm_class = kwargs.pop("tqdm_class", tqdm_auto)
max_workers = kwargs.pop("max_workers", min(32, cpu_count() + 4))
chunksize = kwargs.pop("chunksize", 1)
pool_kwargs = dict(max_workers=max_workers)
sys_version = sys.version_info[:2]
if sys_version >= (3, 7):
# share lock in case workers are already using `tqdm`
pool_kwargs.update(
initializer=tqdm_class.set_lock, initargs=(tqdm_class.get_lock(),))
map_args = {}
if not (3, 0) < sys_version < (3, 5):
map_args.update(chunksize=chunksize)
with PoolExecutor(**pool_kwargs) as ex:
return list(tqdm_class(
ex.map(fn, *iterables, **map_args), **kwargs))
lock_name = kwargs.pop("lock_name", "")
with ensure_lock(tqdm_class, lock_name=lock_name) as lk:
pool_kwargs = dict(max_workers=max_workers)
sys_version = sys.version_info[:2]
if sys_version >= (3, 7):
# share lock in case workers are already using `tqdm`
pool_kwargs.update(initializer=tqdm_class.set_lock, initargs=(lk,))
map_args = {}
if not (3, 0) < sys_version < (3, 5):
map_args.update(chunksize=chunksize)
with PoolExecutor(**pool_kwargs) as ex:
return list(tqdm_class(
ex.map(fn, *iterables, **map_args), **kwargs))


def thread_map(fn, *iterables, **tqdm_kwargs):
Expand All @@ -63,9 +80,9 @@ def thread_map(fn, *iterables, **tqdm_kwargs):
Parameters
----------
tqdm_class : optional
tqdm_class : optional
`tqdm` class to use for bars [default: tqdm.auto.tqdm].
max_workers : int, optional
max_workers : int, optional
Maximum number of workers to spawn; passed to
`concurrent.futures.ThreadPoolExecutor.__init__`.
[default: max(32, cpu_count() + 4)].
Expand All @@ -83,13 +100,15 @@ def process_map(fn, *iterables, **tqdm_kwargs):
----------
tqdm_class : optional
`tqdm` class to use for bars [default: tqdm.auto.tqdm].
max_workers : int, optional
max_workers : int, optional
Maximum number of workers to spawn; passed to
`concurrent.futures.ProcessPoolExecutor.__init__`.
[default: min(32, cpu_count() + 4)].
chunksize : int, optional
chunksize : int, optional
Size of chunks sent to worker processes; passed to
`concurrent.futures.ProcessPoolExecutor.map`. [default: 1].
lock_name : str, optional
Member of `tqdm_class.get_lock()` to use [default: mp_lock].
"""
from concurrent.futures import ProcessPoolExecutor
if iterables and "chunksize" not in tqdm_kwargs:
Expand All @@ -102,4 +121,7 @@ def process_map(fn, *iterables, **tqdm_kwargs):
" This may seriously degrade multiprocess performance."
" Set `chunksize=1` or more." % longest_iterable_len,
TqdmWarning, stacklevel=2)
if "lock_name" not in tqdm_kwargs:
tqdm_kwargs = tqdm_kwargs.copy()
tqdm_kwargs["lock_name"] = "mp_lock"
return _executor_map(ProcessPoolExecutor, fn, *iterables, **tqdm_kwargs)

0 comments on commit 8f9e03d

Please sign in to comment.