aboutsummaryrefslogtreecommitdiffstats
path: root/pygithub3/core
diff options
context:
space:
mode:
Diffstat (limited to 'pygithub3/core')
-rw-r--r--pygithub3/core/result.py207
-rw-r--r--pygithub3/core/result/__init__.py0
-rw-r--r--pygithub3/core/result/base.py109
-rw-r--r--pygithub3/core/result/link.py (renamed from pygithub3/core/link.py)3
-rw-r--r--pygithub3/core/result/normal.py115
-rw-r--r--pygithub3/core/result/smart.py110
6 files changed, 336 insertions, 208 deletions
diff --git a/pygithub3/core/result.py b/pygithub3/core/result.py
deleted file mode 100644
index c2928d2..0000000
--- a/pygithub3/core/result.py
+++ /dev/null
@@ -1,207 +0,0 @@
-#!/usr/bin/env python
-# -*- encoding: utf-8 -*-
-
-import functools
-
-from .link import Link
-
-
-class Method(object):
- """ Lazy support """
-
- def __init__(self, method, request, **method_args):
- self.method = functools.partial(method, request, **method_args)
- self.resource = request.resource
- self.cache = {}
-
- def cached(func):
- """ Decorator to don't do a request if it's cached """
- def wrapper(self, page=1):
- if str(page) in self.cache:
- return self.cache[str(page)]
- return func(self, page)
- return wrapper
-
- def if_needs_lastpage(func):
- """ Decorator to set last page only if it can and it hasn't retrieved
- before """
- def wrapper(self, has_link):
- has_last_page = hasattr(self, 'last_page')
- if not has_last_page and has_link:
- return func(self, has_link)
- elif not has_last_page and not has_link:
- self.last_page = 1
- return wrapper
-
- @if_needs_lastpage
- def __set_last_page_from(self, link_header):
- """ Get and set last_page form link header """
- link = Link(link_header)
- self.last_page = int(link.last.params.get('page'))
-
- @cached
- def __call__(self, page=1):
- """ Call a real request """
- response = self.method(page=page)
- self.__set_last_page_from(response.headers.get('link'))
- self.cache[str(page)] = self.resource.loads(response.content)
- return self.cache[str(page)]
-
- @property
- def last(self):
- if not hasattr(self, 'last_page'):
- self()
- return self.last_page
-
-
-class Page(object):
- """ Iterator of resources """
-
- def __init__(self, getter, page=1):
- self.getter = getter
- self.page = page
-
- def __iter__(self):
- return self
-
- def __add__(self, number):
- return self.page + number
-
- def __radd__(self, number):
- return number + self.page
-
- def __sub__(self, number):
- return self.page - number
-
- def __rsub__(self, number):
- return number - self.page
-
- def __lt__(self, number):
- return self.page < number
-
- def __le__(self, number):
- return self.page <= number
-
- def __eq__(self, number):
- return self.page == number
-
- def __ne__(self, number):
- return self.page != number
-
- def __gt__(self, number):
- return self.page > number
-
- def __ge__(self, number):
- return self.page >= number
-
- @property
- def resources(self):
- return getattr(self, '_count', None) or u"~"
-
- def get_content(func):
- def wrapper(self):
- if not hasattr(self, '_count'):
- content = self.getter(self.page)
- self._count = len(content)
- self.iterable = iter(content)
- return func(self)
- return wrapper
-
- @get_content
- def __next__(self):
- try:
- return self.iterable.next()
- except StopIteration:
- self.iterable = iter(self.getter(self.page))
- raise StopIteration
-
- def next(self):
- return self.__next__()
-
- def __str__(self):
- return '<{name}{page} resources={resources}>'.format(
- name=self.__class__.__name__,
- page=self.page,
- resources=self.resources)
-
- def __repr__(self):
- return "%s[%d]" % (self.__str__(), id(self))
-
-
-class Result(object):
- """
- Result is a very **lazy** paginator beacuse only do a real request when is
- needed, besides it's **cached**, so never repeats a request.
-
- You have several ways to consume it
-
- #. Iterating over the result::
-
- result = some_request()
- for page in result:
- for resource in page:
- print resource
-
- #. With a generator::
-
- result = some_request()
- for resource in result.iterator():
- print resource
-
- #. As a list::
-
- result = some_request()
- print result.all()
-
- #. Also you can request some page manually
-
- .. autoattribute:: pygithub3.core.result.Result.pages
- .. automethod:: pygithub3.core.result.Result.get_page
-
- Each ``Page`` is an iterator and contains resources::
-
- result = some_request()
- assert result.pages > 3
- page3 = result.get_page(3)
- page3_resources = list(page3)
- """
-
- def __init__(self, client, request, **kwargs):
- self.getter = Method(client.get, request, **kwargs)
- self.page = Page(self.getter)
-
- def __iter__(self):
- return self
-
- def __next__(self):
- if self.page <= self.pages:
- page_to_return = self.page
- self.page = Page(self.getter, page_to_return + 1)
- return page_to_return
- self.page = Page(self.getter)
- raise StopIteration
-
- def next(self):
- return self.__next__()
-
- @property
- def pages(self):
- """ Total number of pages in request """
- return self.getter.last
-
- def get_page(self, page):
- """ Get ``Page`` of resources
-
- :param int page: Page number
- """
- if page in xrange(1, self.pages + 1):
- return Page(self.getter, page)
- return None
-
- def iterator(self):
- for page in self:
- for resource in page:
- yield resource
-
- def all(self):
- return list(self.iterator())
diff --git a/pygithub3/core/result/__init__.py b/pygithub3/core/result/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/pygithub3/core/result/__init__.py
diff --git a/pygithub3/core/result/base.py b/pygithub3/core/result/base.py
new file mode 100644
index 0000000..b33f97e
--- /dev/null
+++ b/pygithub3/core/result/base.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python
+# -*- encoding: utf-8 -*-
+
+import functools
+
+
+class Method(object):
+ """ It wraps the requester method, with behaviour to results """
+
+ def __init__(self, method, request, **method_args):
+ self.method = functools.partial(method, request, **method_args)
+ self.resource = request.resource
+ self.cache = {}
+
+ def __call__(self):
+ raise NotImplementedError
+
+
+class Page(object):
+ """ Iterator of resources """
+
+ def __init__(self, getter, page=1):
+ self.getter = getter
+ self.page = page
+
+ def __iter__(self):
+ return self
+
+ def __add__(self, number):
+ return self.page + number
+
+ def __radd__(self, number):
+ return number + self.page
+
+ def __sub__(self, number):
+ return self.page - number
+
+ def __rsub__(self, number):
+ return number - self.page
+
+ def __lt__(self, number):
+ return self.page < number
+
+ def __le__(self, number):
+ return self.page <= number
+
+ def __eq__(self, number):
+ return self.page == number
+
+ def __ne__(self, number):
+ return self.page != number
+
+ def __gt__(self, number):
+ return self.page > number
+
+ def __ge__(self, number):
+ return self.page >= number
+
+ @property
+ def resources(self):
+ return getattr(self, 'count', None) or '~'
+
+ def get_content(func):
+ def wrapper(self):
+ if not hasattr(self, 'count'):
+ content = self.getter(self.page)
+ self.count = len(content)
+ self.iterable = iter(content)
+ return func(self)
+ return wrapper
+
+ @get_content
+ def __next__(self):
+ try:
+ return self.iterable.next()
+ except StopIteration:
+ self.iterable = iter(self.getter(self.page))
+ raise StopIteration
+
+ def next(self):
+ return self.__next__()
+
+ def __str__(self):
+ return '<{name}{page} resources={resources}>'.format(
+ name=self.__class__.__name__,
+ page=self.page,
+ resources=self.resources)
+
+
+class Result(object):
+ """ Iterator of pages """
+
+ def __init__(self, method):
+ self.getter = method
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ return self.__next__()
+
+ def iterator(self):
+ """ generator """
+ for page in self:
+ for resource in page:
+ yield resource
+
+ def all(self):
+ return list(self.iterator())
diff --git a/pygithub3/core/link.py b/pygithub3/core/result/link.py
index 1d6be2c..b6a614f 100644
--- a/pygithub3/core/link.py
+++ b/pygithub3/core/result/link.py
@@ -3,7 +3,8 @@
from urlparse import urlparse, parse_qs
-from .third_libs.link_header import parse_link_value
+from pygithub3.core.third_libs.link_header import parse_link_value
+
class Link(str):
diff --git a/pygithub3/core/result/normal.py b/pygithub3/core/result/normal.py
new file mode 100644
index 0000000..c38a915
--- /dev/null
+++ b/pygithub3/core/result/normal.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python
+# -*- encoding: utf-8 -*-
+
+from . import base
+from .link import Link
+
+
+class Method(base.Method):
+ """ Cache support and builds next request """
+
+ def __init__(self, *args, **kwargs):
+ super(Method, self).__init__(*args, **kwargs)
+ self.next = True
+
+ def cached(func):
+ def wrapper(self, page=1):
+ if str(page) in self.cache:
+ return self.cache[str(page)]['content']
+ return func(self, page)
+ return wrapper
+
+ def next_getter_from(self, response):
+ link = Link(response.headers.get('link'))
+ if hasattr(link, 'next'):
+ return base.functools.partial(self.method, **link.next.params)
+ self.next = False
+
+ @cached
+ def __call__(self, page=1):
+ prev = self.cache.get(str(page - 1))
+ method = prev and prev['next'] or self.method
+ response = method()
+ self.cache[str(page)] = {
+ 'content': self.resource.loads(response.content),
+ 'next': self.next_getter_from(response)
+ }
+ return self.cache[str(page)]['content']
+
+
+class Page(base.Page):
+ """ Consumed when instance """
+
+ def __init__(self, getter, page=1):
+ super(Page, self).__init__(getter, page)
+ content = getter(page)
+ self.iterable = iter(content)
+ self.count = len(content)
+
+
+class Result(base.Result):
+ """
+ It's a middle-lazy iterator, because to get a new page it needs
+ make a real request, besides it's **cached**, so never repeats a request.
+
+ You have several ways to consume it
+
+ #. Iterating over the result::
+
+ result = some_request()
+ for page in result:
+ for resource in page:
+ print resource
+
+ #. With a generator::
+
+ result = some_request()
+ for resource in result.iterator():
+ print resource
+
+ #. As a list::
+
+ result = some_request()
+ print result.all()
+
+ """
+
+ """ TODO: limit in {all/iterator}
+ .. note::
+ You can use ``limit`` with `all` and `iterator`
+ ::
+ result = some_request()
+ _5resources = result.all(limit=5)
+
+ This exists because it can't request a explitic page, and some requests
+ can have thousand of resources (e.g Repository's commits)
+ """
+
+ def __init__(self, method):
+ super(Result, self).__init__(method)
+ self.counter = 0
+ self.cached = False
+
+ def get_cached(func):
+ def wrapper(self):
+ if self.cached:
+ if str(self.counter) in self.getter.cache:
+ page = Page(self.getter, self.counter)
+ self.counter += 1
+ return page
+ self._reset()
+ raise StopIteration
+ return func(self)
+ return wrapper
+
+ @get_cached
+ def __next__(self):
+ if self.getter.next:
+ self.counter += 1
+ return Page(self.getter, self.counter)
+ self._reset()
+ raise StopIteration
+
+ def _reset(self):
+ self.counter = 1
+ self.cached = True
diff --git a/pygithub3/core/result/smart.py b/pygithub3/core/result/smart.py
new file mode 100644
index 0000000..0343a9b
--- /dev/null
+++ b/pygithub3/core/result/smart.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+# -*- encoding: utf-8 -*-
+
+from . import base
+from .link import Link
+
+
+class Method(base.Method):
+ """ Lazy and cache support """
+
+ def cached(func):
+ """ Decorator to don't do a request if it's cached """
+ def wrapper(self, page=1):
+ if str(page) in self.cache:
+ return self.cache[str(page)]
+ return func(self, page)
+ return wrapper
+
+ def if_needs_lastpage(func):
+ """ Decorator to set last page only if it can and it hasn't retrieved
+ before """
+ def wrapper(self, has_link):
+ has_last_page = hasattr(self, 'last_page')
+ if not has_last_page and has_link:
+ return func(self, has_link)
+ elif not has_last_page and not has_link:
+ self.last_page = 1
+ return wrapper
+
+ @if_needs_lastpage
+ def __set_last_page_from(self, link_header):
+ """ Get and set last_page form link header """
+ link = Link(link_header)
+ self.last_page = int(link.last.params.get('page'))
+
+ @cached
+ def __call__(self, page=1):
+ """ Call a real request """
+ response = self.method(page=page)
+ self.__set_last_page_from(response.headers.get('link'))
+ self.cache[str(page)] = self.resource.loads(response.content)
+ return self.cache[str(page)]
+
+ @property
+ def last(self):
+ if not hasattr(self, 'last_page'):
+ self()
+ return self.last_page
+
+
+class Result(base.Result):
+ """
+ It's a very **lazy** paginator beacuse only do a real request
+ when is needed, besides it's **cached**, so never repeats a request.
+
+ You have several ways to consume it
+
+ #. Iterating over the result::
+
+ result = some_request()
+ for page in result:
+ for resource in page:
+ print resource
+
+ #. With a generator::
+
+ result = some_request()
+ for resource in result.iterator():
+ print resource
+
+ #. As a list::
+
+ result = some_request()
+ print result.all()
+
+ #. Also you can request some page manually
+
+ Each ``Page`` is an iterator and contains resources::
+
+ result = some_request()
+ assert result.pages > 3
+ page3 = result.get_page(3)
+ page3_resources = list(page3)
+ """
+
+ def __init__(self, method):
+ super(Result, self).__init__(method)
+ self.page = base.Page(self.getter)
+
+ def __next__(self):
+ if self.page <= self.pages:
+ page_to_return = self.page
+ self.page = base.Page(self.getter, page_to_return + 1)
+ return page_to_return
+ self.page = base.Page(self.getter)
+ raise StopIteration
+
+ @property
+ def pages(self):
+ """ Total number of pages in request """
+ return self.getter.last
+
+ def get_page(self, page):
+ """ Get ``Page`` of resources
+
+ :param int page: Page number
+ """
+ if page in xrange(1, self.pages + 1):
+ return base.Page(self.getter, page)
+ return None