mirror of
https://github.com/lilydjwg/nvchecker.git
synced 2025-03-10 06:14:02 +00:00
add a `max_page
` option to github source
also update README for other minor issue.
This commit is contained in:
parent
e5d52a9762
commit
c23ef737d4
2 changed files with 15 additions and 8 deletions
13
README.rst
13
README.rst
|
@ -286,9 +286,14 @@ use_latest_release
|
||||||
includes both annotated tags and lightweight ones.
|
includes both annotated tags and lightweight ones.
|
||||||
|
|
||||||
use_max_tag
|
use_max_tag
|
||||||
Set this to ``true`` to check for the max tag on GitHub. Unlike ``use_latest_release``,
|
Set this to ``true`` to check for the max tag on GitHub. Unlike
|
||||||
this option includes both annotated tags and lightweight ones, and return the biggest one
|
``use_latest_release``, this option includes both annotated tags and
|
||||||
sorted by ``pkg_resources.parse_version``.
|
lightweight ones, and return the largest one sorted by the
|
||||||
|
``sort_version_key`` option.
|
||||||
|
|
||||||
|
max_page
|
||||||
|
How many pages do we search for the max tag? Default is 3. This works when
|
||||||
|
``use_max_tag`` is set.
|
||||||
|
|
||||||
proxy
|
proxy
|
||||||
The HTTP proxy to use. The format is ``host:port``, e.g. ``localhost:8087``.
|
The HTTP proxy to use. The format is ``host:port``, e.g. ``localhost:8087``.
|
||||||
|
@ -300,7 +305,7 @@ An environment variable ``NVCHECKER_GITHUB_TOKEN`` or a key named ``github``
|
||||||
can be set to a GitHub OAuth token in order to request more frequently than
|
can be set to a GitHub OAuth token in order to request more frequently than
|
||||||
anonymously.
|
anonymously.
|
||||||
|
|
||||||
This source supports `list options`_ when ``use_latest_release`` or ``use_max_tag`` is set.
|
This source supports `list options`_ when ``use_max_tag`` is set.
|
||||||
|
|
||||||
Check BitBucket
|
Check BitBucket
|
||||||
---------------
|
---------------
|
||||||
|
|
|
@ -62,6 +62,7 @@ async def get_version_real(name, conf, **kwargs):
|
||||||
return await max_tag(partial(
|
return await max_tag(partial(
|
||||||
session.get, headers=headers, **kwargs),
|
session.get, headers=headers, **kwargs),
|
||||||
url, name, ignored_tags, include_tags_pattern,
|
url, name, ignored_tags, include_tags_pattern,
|
||||||
|
max_page = conf.get("max_page", 3),
|
||||||
)
|
)
|
||||||
|
|
||||||
async with session.get(url, headers=headers, **kwargs) as res:
|
async with session.get(url, headers=headers, **kwargs) as res:
|
||||||
|
@ -84,11 +85,12 @@ async def get_version_real(name, conf, **kwargs):
|
||||||
return version
|
return version
|
||||||
|
|
||||||
async def max_tag(
|
async def max_tag(
|
||||||
getter, url, name, ignored_tags, include_tags_pattern,
|
getter, url, name, ignored_tags, include_tags_pattern, max_page,
|
||||||
):
|
):
|
||||||
# paging is needed
|
# paging is needed
|
||||||
|
tags = []
|
||||||
|
|
||||||
while True:
|
for _ in range(max_page):
|
||||||
async with getter(url) as res:
|
async with getter(url) as res:
|
||||||
logger.debug('X-RateLimit-Remaining',
|
logger.debug('X-RateLimit-Remaining',
|
||||||
n=res.headers.get('X-RateLimit-Remaining'))
|
n=res.headers.get('X-RateLimit-Remaining'))
|
||||||
|
@ -100,7 +102,7 @@ async def max_tag(
|
||||||
data = [x for x in data
|
data = [x for x in data
|
||||||
if re.search(include_tags_pattern, x)]
|
if re.search(include_tags_pattern, x)]
|
||||||
if data:
|
if data:
|
||||||
return data
|
tags += data
|
||||||
else:
|
else:
|
||||||
next_page_url = get_next_page_url(links)
|
next_page_url = get_next_page_url(links)
|
||||||
if not next_page_url:
|
if not next_page_url:
|
||||||
|
@ -111,7 +113,7 @@ async def max_tag(
|
||||||
logger.error('No tag found in upstream repository.',
|
logger.error('No tag found in upstream repository.',
|
||||||
name=name,
|
name=name,
|
||||||
include_tags_pattern=include_tags_pattern)
|
include_tags_pattern=include_tags_pattern)
|
||||||
return
|
return tags
|
||||||
|
|
||||||
def get_next_page_url(links):
|
def get_next_page_url(links):
|
||||||
links = links.split(', ')
|
links = links.split(', ')
|
||||||
|
|
Loading…
Add table
Reference in a new issue