summaryrefslogtreecommitdiffstats
path: root/google_appengine/google/appengine/api/memcache/__init__.py
blob: 1f23cb6643af9189224dfc8859f337656b9c71e1 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

"""Memcache API.

Provides memcached-alike API to application developers to store
data in memory when reliable storage via the DataStore API isn't
required and higher performance is desired.
"""



import cStringIO
import math
import pickle
import types
import sha

from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import namespace_manager
from google.appengine.api.memcache import memcache_service_pb
from google.appengine.runtime import apiproxy_errors

MemcacheSetResponse = memcache_service_pb.MemcacheSetResponse
MemcacheSetRequest = memcache_service_pb.MemcacheSetRequest

MemcacheGetResponse = memcache_service_pb.MemcacheGetResponse
MemcacheGetRequest = memcache_service_pb.MemcacheGetRequest

MemcacheDeleteResponse = memcache_service_pb.MemcacheDeleteResponse
MemcacheDeleteRequest = memcache_service_pb.MemcacheDeleteRequest

MemcacheIncrementResponse = memcache_service_pb.MemcacheIncrementResponse
MemcacheIncrementRequest = memcache_service_pb.MemcacheIncrementRequest

MemcacheFlushResponse = memcache_service_pb.MemcacheFlushResponse
MemcacheFlushRequest = memcache_service_pb.MemcacheFlushRequest

MemcacheStatsRequest = memcache_service_pb.MemcacheStatsRequest
MemcacheStatsResponse = memcache_service_pb.MemcacheStatsResponse

DELETE_NETWORK_FAILURE = 0
DELETE_ITEM_MISSING = 1
DELETE_SUCCESSFUL = 2

MAX_KEY_SIZE = 250
MAX_VALUE_SIZE = 10 ** 6

STAT_HITS = 'hits'
STAT_MISSES = 'misses'
STAT_BYTE_HITS = 'byte_hits'
STAT_ITEMS = 'items'
STAT_BYTES = 'bytes'
STAT_OLDEST_ITEM_AGES = 'oldest_item_age'

FLAG_TYPE_MASK = 7
FLAG_COMPRESSED = 1 << 3

TYPE_STR = 0
TYPE_UNICODE = 1
TYPE_PICKLED = 2
TYPE_INT = 3
TYPE_LONG = 4
TYPE_BOOL = 5


def _key_string(key, key_prefix='', server_to_user_dict=None):
  """Utility function to handle different ways of requesting keys.

  Args:
    key: Either a string or tuple of (shard_number, string).  In Google App
      Engine the sharding is automatic so the shard number is ignored.
      To memcache, the key is just bytes (no defined encoding).
    key_prefix: Optional string prefix to prepend to key.
    server_to_user_dict: Optional dictionary to populate with a mapping of
      server-side key (which includes the key_prefix) to user-supplied key
      (which does not have the prefix).

  Returns:
    The key as a non-unicode string prepended with key_prefix. This is
    the key sent to and stored by the server. If the resulting key is
    longer then MAX_KEY_SIZE, it will be hashed with sha1 and will be
    replaced with the hex representation of the said hash.

  Raises:
    TypeError: If provided key isn't a string or tuple of (int, string)
      or key_prefix or server_to_user_dict are of the wrong type.
  """
  if type(key) is types.TupleType:
    key = key[1]
  if not isinstance(key, basestring):
    raise TypeError('Key must be a string instance, received %r' % key)
  if not isinstance(key_prefix, basestring):
    raise TypeError('key_prefix must be a string instance, received %r' %
                    key_prefix)

  server_key = key_prefix + key
  if isinstance(server_key, unicode):
    server_key = server_key.encode('utf-8')

  if len(server_key) > MAX_KEY_SIZE:
    server_key = sha.new(server_key).hexdigest()

  if server_to_user_dict is not None:
    if not isinstance(server_to_user_dict, dict):
      raise TypeError('server_to_user_dict must be a dict instance, ' +
                      'received %r' % key)
    server_to_user_dict[server_key] = key

  return server_key


def _validate_encode_value(value, do_pickle):
  """Utility function to validate and encode server keys and values.

  Args:
    value: Value to store in memcache. If it's a string, it will get passed
      along as-is. If it's a unicode string, it will be marked appropriately,
      such that retrievals will yield a unicode value. If it's any other data
      type, this function will attempt to pickle the data and then store the
      serialized result, unpickling it upon retrieval.
    do_pickle: Callable that takes an object and returns a non-unicode
      string containing the pickled object.

  Returns:
    Tuple (stored_value, flags) where:
      stored_value: The value as a non-unicode string that should be stored
        in memcache.
      flags: An integer with bits set from the FLAG_* constants in this file
        to indicate the encoding of the key and value.

  Raises:
    ValueError: If the encoded value is too large.
    pickle.PicklingError: If the value is not a string and could not be pickled.
    RuntimeError: If a complicated data structure could not be pickled due to
      too many levels of recursion in its composition.
  """
  flags = 0
  stored_value = value

  if isinstance(value, str):
    pass
  elif isinstance(value, unicode):
    stored_value = value.encode('utf-8')
    flags |= TYPE_UNICODE
  elif isinstance(value, bool):
    stored_value = str(int(value))
    flags |= TYPE_BOOL
  elif isinstance(value, int):
    stored_value = str(value)
    flags |= TYPE_INT
  elif isinstance(value, long):
    stored_value = str(value)
    flags |= TYPE_LONG
  else:
    stored_value = do_pickle(value)
    flags |= TYPE_PICKLED


  if len(stored_value) > MAX_VALUE_SIZE:
    raise ValueError('Values may not be more than %d bytes in length; '
                     'received %d bytes' % (MAX_VALUE_SIZE, len(stored_value)))

  return (stored_value, flags)


def _decode_value(stored_value, flags, do_unpickle):
  """Utility function for decoding values retrieved from memcache.

  Args:
    stored_value: The value as a non-unicode string that was stored.
    flags: An integer with bits set from the FLAG_* constants in this file
      that indicate the encoding of the key and value.
    do_unpickle: Callable that takes a non-unicode string object that contains
      a pickled object and returns the pickled object.

  Returns:
    The original object that was stored, be it a normal string, a unicode
    string, int, long, or a Python object that was pickled.

  Raises:
    pickle.UnpicklingError: If the value could not be unpickled.
  """
  assert isinstance(stored_value, str)
  assert isinstance(flags, (int, long))

  type_number = flags & FLAG_TYPE_MASK
  value = stored_value


  if type_number == TYPE_STR:
    return value
  elif type_number == TYPE_UNICODE:
    return value.decode('utf-8')
  elif type_number == TYPE_PICKLED:
    return do_unpickle(value)
  elif type_number == TYPE_BOOL:
    return bool(int(value))
  elif type_number == TYPE_INT:
    return int(value)
  elif type_number == TYPE_LONG:
    return long(value)
  else:
    assert False, "Unknown stored type"
  assert False, "Shouldn't get here."

class Client(object):
  """Memcache client object, through which one invokes all memcache operations.

  Several methods are no-ops to retain source-level compatibility
  with the existing popular Python memcache library.

  Any method that takes a 'key' argument will accept that key as a string
  (unicode or not) or a tuple of (hash_value, string) where the hash_value,
  normally used for sharding onto a memcache instance, is instead ignored, as
  Google App Engine deals with the sharding transparently. Keys in memcache are
  just bytes, without a specified encoding. All such methods may raise TypeError
  if provided a bogus key value and a ValueError if the key is too large.

  Any method that takes a 'value' argument will accept as that value any
  string (unicode or not), int, long, or pickle-able Python object, including
  all native types.  You'll get back from the cache the same type that you
  originally put in.
  """

  def __init__(self, servers=None, debug=0,
               pickleProtocol=pickle.HIGHEST_PROTOCOL,
               pickler=pickle.Pickler,
               unpickler=pickle.Unpickler,
               pload=None,
               pid=None,
               make_sync_call=apiproxy_stub_map.MakeSyncCall):
    """Create a new Client object.

    No parameters are required.

    Arguments:
      servers: Ignored; only for compatibility.
      debug: Ignored; only for compatibility.
      pickleProtocol: Pickle protocol to use for pickling the object.
      pickler: pickle.Pickler sub-class to use for pickling.
      unpickler: pickle.Unpickler sub-class to use for unpickling.
      pload: Callable to use for retrieving objects by persistent id.
      pid: Callable to use for determine the persistent id for objects, if any.
      make_sync_call: Function to use to make an App Engine service call.
        Used for testing.
    """
    self._pickle_data = cStringIO.StringIO()
    self._pickler_instance = pickler(self._pickle_data,
                                     protocol=pickleProtocol)
    self._unpickler_instance = unpickler(self._pickle_data)
    if pid is not None:
      self._pickler_instance.persistent_id = pid
    if pload is not None:
      self._unpickler_instance.persistent_load = pload

    def DoPickle(value):
      self._pickle_data.truncate(0)
      self._pickler_instance.clear_memo()
      self._pickler_instance.dump(value)
      return self._pickle_data.getvalue()
    self._do_pickle = DoPickle

    def DoUnpickle(value):
      self._pickle_data.truncate(0)
      self._pickle_data.write(value)
      self._pickle_data.seek(0)
      self._unpickler_instance.memo.clear()
      return self._unpickler_instance.load()
    self._do_unpickle = DoUnpickle

    self._make_sync_call = make_sync_call

  def set_servers(self, servers):
    """Sets the pool of memcache servers used by the client.

    This is purely a compatibility method.  In Google App Engine, it's a no-op.
    """
    pass

  def disconnect_all(self):
    """Closes all connections to memcache servers.

    This is purely a compatibility method.  In Google App Engine, it's a no-op.
    """
    pass

  def forget_dead_hosts(self):
    """Resets all servers to the alive status.

    This is purely a compatibility method.  In Google App Engine, it's a no-op.
    """
    pass

  def debuglog(self):
    """Logging function for debugging information.

    This is purely a compatibility method.  In Google App Engine, it's a no-op.
    """
    pass

  def get_stats(self):
    """Gets memcache statistics for this application.

    All of these statistics may reset due to various transient conditions. They
    provide the best information available at the time of being called.

    Returns:
      Dictionary mapping statistic names to associated values. Statistics and
      their associated meanings:

        hits: Number of cache get requests resulting in a cache hit.
        misses: Number of cache get requests resulting in a cache miss.
        byte_hits: Sum of bytes transferred on get requests. Rolls over to
          zero on overflow.
        items: Number of key/value pairs in the cache.
        bytes: Total size of all items in the cache.
        oldest_item_age: How long in seconds since the oldest item in the
          cache was accessed. Effectively, this indicates how long a new
          item will survive in the cache without being accessed. This is
          _not_ the amount of time that has elapsed since the item was
          created.

      On error, returns None.
    """
    request = MemcacheStatsRequest()
    response = MemcacheStatsResponse()
    try:
      self._make_sync_call('memcache', 'Stats', request, response)
    except apiproxy_errors.Error:
      return None

    if not response.has_stats():
      return {
        STAT_HITS: 0,
        STAT_MISSES: 0,
        STAT_BYTE_HITS: 0,
        STAT_ITEMS: 0,
        STAT_BYTES: 0,
        STAT_OLDEST_ITEM_AGES: 0,
      }

    stats = response.stats()
    return {
      STAT_HITS: stats.hits(),
      STAT_MISSES: stats.misses(),
      STAT_BYTE_HITS: stats.byte_hits(),
      STAT_ITEMS: stats.items(),
      STAT_BYTES: stats.bytes(),
      STAT_OLDEST_ITEM_AGES: stats.oldest_item_age(),
    }

  def flush_all(self):
    """Deletes everything in memcache.

    Returns:
      True on success, False on RPC or server error.
    """
    request = MemcacheFlushRequest()
    response = MemcacheFlushResponse()
    try:
      self._make_sync_call('memcache', 'FlushAll', request, response)
    except apiproxy_errors.Error:
      return False
    return True

  def get(self, key, namespace=None):
    """Looks up a single key in memcache.

    If you have multiple items to load, though, it's much more efficient
    to use get_multi() instead, which loads them in one bulk operation,
    reducing the networking latency that'd otherwise be required to do
    many serialized get() operations.

    Args:
      key: The key in memcache to look up.  See docs on Client
        for details of format.
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      The value of the key, if found in memcache, else None.
    """
    request = MemcacheGetRequest()
    request.add_key(_key_string(key))
    namespace_manager._add_name_space(request, namespace)
    response = MemcacheGetResponse()
    try:
      self._make_sync_call('memcache', 'Get', request, response)
    except apiproxy_errors.Error:
      return None

    if not response.item_size():
      return None

    return _decode_value(response.item(0).value(),
                         response.item(0).flags(),
                         self._do_unpickle)

  def get_multi(self, keys, key_prefix='', namespace=None):
    """Looks up multiple keys from memcache in one operation.

    This is the recommended way to do bulk loads.

    Args:
      keys: List of keys to look up.  Keys may be strings or
        tuples of (hash_value, string).  Google App Engine
        does the sharding and hashing automatically, though, so the hash
        value is ignored.  To memcache, keys are just series of bytes,
        and not in any particular encoding.
      key_prefix: Prefix to prepend to all keys when talking to the server;
        not included in the returned dictionary.
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      A dictionary of the keys and values that were present in memcache.
      Even if the key_prefix was specified, that key_prefix won't be on
      the keys in the returned dictionary.
    """
    request = MemcacheGetRequest()
    namespace_manager._add_name_space(request, namespace)
    response = MemcacheGetResponse()
    user_key = {}
    for key in keys:
      request.add_key(_key_string(key, key_prefix, user_key))
    try:
      self._make_sync_call('memcache', 'Get', request, response)
    except apiproxy_errors.Error:
      return {}

    return_value = {}
    for returned_item in response.item_list():
      value = _decode_value(returned_item.value(), returned_item.flags(),
                            self._do_unpickle)
      return_value[user_key[returned_item.key()]] = value
    return return_value

  def delete(self, key, seconds=0, namespace=None):
    """Deletes a key from memcache.

    Args:
      key: Key to delete.  See docs on Client for detils.
      seconds: Optional number of seconds to make deleted items 'locked'
        for 'add' operations. Value can be a delta from current time (up to
        1 month), or an absolute Unix epoch time.  Defaults to 0, which means
        items can be immediately added.  With or without this option,
        a 'set' operation will always work.  Float values will be rounded up to
        the nearest whole second.
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      DELETE_NETWORK_FAILURE (0) on network failure,
      DELETE_ITEM_MISSING (1) if the server tried to delete the item but
      didn't have it, or
      DELETE_SUCCESSFUL (2) if the item was actually deleted.
      This can be used as a boolean value, where a network failure is the
      only bad condition.
    """
    if not isinstance(seconds, (int, long, float)):
      raise TypeError('Delete timeout must be a number.')
    if seconds < 0:
      raise ValueError('Delete timeout must be non-negative.')

    request = MemcacheDeleteRequest()
    namespace_manager._add_name_space(request, namespace)
    response = MemcacheDeleteResponse()

    delete_item = request.add_item()
    delete_item.set_key(_key_string(key))
    delete_item.set_delete_time(int(math.ceil(seconds)))
    try:
      self._make_sync_call('memcache', 'Delete', request, response)
    except apiproxy_errors.Error:
      return DELETE_NETWORK_FAILURE
    assert response.delete_status_size() == 1, 'Unexpected status size.'

    if response.delete_status(0) == MemcacheDeleteResponse.DELETED:
      return DELETE_SUCCESSFUL
    elif response.delete_status(0) == MemcacheDeleteResponse.NOT_FOUND:
      return DELETE_ITEM_MISSING
    assert False, 'Unexpected deletion status code.'

  def delete_multi(self, keys, seconds=0, key_prefix='', namespace=None):
    """Delete multiple keys at once.

    Args:
      keys: List of keys to delete.
      seconds: Optional number of seconds to make deleted items 'locked'
        for 'add' operations. Value can be a delta from current time (up to
        1 month), or an absolute Unix epoch time.  Defaults to 0, which means
        items can be immediately added.  With or without this option,
        a 'set' operation will always work.  Float values will be rounded up to
        the nearest whole second.
      key_prefix: Prefix to put on all keys when sending specified
        keys to memcache.  See docs for get_multi() and set_multi().
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      True if all operations completed successfully.  False if one
      or more failed to complete.
    """
    if not isinstance(seconds, (int, long, float)):
      raise TypeError('Delete timeout must be a number.')
    if seconds < 0:
      raise ValueError('Delete timeout must not be negative.')

    request = MemcacheDeleteRequest()
    namespace_manager._add_name_space(request, namespace)
    response = MemcacheDeleteResponse()

    for key in keys:
      delete_item = request.add_item()
      delete_item.set_key(_key_string(key, key_prefix=key_prefix))
      delete_item.set_delete_time(int(math.ceil(seconds)))
    try:
      self._make_sync_call('memcache', 'Delete', request, response)
    except apiproxy_errors.Error:
      return False
    return True

  def set(self, key, value, time=0, min_compress_len=0, namespace=None):
    """Sets a key's value, regardless of previous contents in cache.

    Unlike add() and replace(), this method always sets (or
    overwrites) the value in memcache, regardless of previous
    contents.

    Args:
      key: Key to set.  See docs on Client for details.
      value: Value to set.  Any type.  If complex, will be pickled.
      time: Optional expiration time, either relative number of seconds
        from current time (up to 1 month), or an absolute Unix epoch time.
        By default, items never expire, though items may be evicted due to
        memory pressure.  Float values will be rounded up to the nearest
        whole second.
      min_compress_len: Ignored option for compatibility.
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      True if set.  False on error.
    """
    return self._set_with_policy(MemcacheSetRequest.SET, key, value, time=time,
                                 namespace=namespace)

  def add(self, key, value, time=0, min_compress_len=0, namespace=None):
    """Sets a key's value, iff item is not already in memcache.

    Args:
      key: Key to set.  See docs on Client for details.
      value: Value to set.  Any type.  If complex, will be pickled.
      time: Optional expiration time, either relative number of seconds
        from current time (up to 1 month), or an absolute Unix epoch time.
        By default, items never expire, though items may be evicted due to
        memory pressure.  Float values will be rounded up to the nearest
        whole second.
      min_compress_len: Ignored option for compatibility.
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      True if added.  False on error.
    """
    return self._set_with_policy(MemcacheSetRequest.ADD, key, value, time=time,
                                 namespace=namespace)

  def replace(self, key, value, time=0, min_compress_len=0, namespace=None):
    """Replaces a key's value, failing if item isn't already in memcache.

    Args:
      key: Key to set.  See docs on Client for details.
      value: Value to set.  Any type.  If complex, will be pickled.
      time: Optional expiration time, either relative number of seconds
        from current time (up to 1 month), or an absolute Unix epoch time.
        By default, items never expire, though items may be evicted due to
        memory pressure.  Float values will be rounded up to the nearest
        whole second.
      min_compress_len: Ignored option for compatibility.
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      True if replaced.  False on RPC error or cache miss.
    """
    return self._set_with_policy(MemcacheSetRequest.REPLACE,
                                 key, value, time=time, namespace=namespace)

  def _set_with_policy(self, policy, key, value, time=0, namespace=None):
    """Sets a single key with a specified policy.

    Helper function for set(), add(), and replace().

    Args:
      policy:  One of MemcacheSetRequest.SET, .ADD, or .REPLACE.
      key: Key to add, set, or replace.  See docs on Client for details.
      value: Value to set.
      time: Expiration time, defaulting to 0 (never expiring).
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      True if stored, False on RPC error or policy error, e.g. a replace
      that failed due to the item not already existing, or an add
      failing due to the item not already existing.
    """
    if not isinstance(time, (int, long, float)):
      raise TypeError('Expiration must be a number.')
    if time < 0:
      raise ValueError('Expiration must not be negative.')

    request = MemcacheSetRequest()
    item = request.add_item()
    item.set_key(_key_string(key))
    stored_value, flags = _validate_encode_value(value, self._do_pickle)
    item.set_value(stored_value)
    item.set_flags(flags)
    item.set_set_policy(policy)
    item.set_expiration_time(int(math.ceil(time)))
    namespace_manager._add_name_space(request, namespace)
    response = MemcacheSetResponse()
    try:
      self._make_sync_call('memcache', 'Set', request, response)
    except apiproxy_errors.Error:
      return False
    if response.set_status_size() != 1:
      return False
    return response.set_status(0) == MemcacheSetResponse.STORED

  def _set_multi_with_policy(self, policy, mapping, time=0, key_prefix='',
                             namespace=None):
    """Set multiple keys with a specified policy.

    Helper function for set_multi(), add_multi(), and replace_multi(). This
    reduces the network latency of doing many requests in serial.

    Args:
      policy:  One of MemcacheSetRequest.SET, ADD, or REPLACE.
      mapping: Dictionary of keys to values.
      time: Optional expiration time, either relative number of seconds
        from current time (up to 1 month), or an absolute Unix epoch time.
        By default, items never expire, though items may be evicted due to
        memory pressure.  Float values will be rounded up to the nearest
        whole second.
      key_prefix: Prefix for to prepend to all keys.
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      A list of keys whose values were NOT set.  On total success,
      this list should be empty.  On network/RPC/server errors,
      a list of all input keys is returned; in this case the keys
      may or may not have been updated.
    """
    if not isinstance(time, (int, long, float)):
      raise TypeError('Expiration must be a number.')
    if time < 0.0:
      raise ValueError('Expiration must not be negative.')

    request = MemcacheSetRequest()
    user_key = {}
    server_keys = []
    for key, value in mapping.iteritems():
      server_key = _key_string(key, key_prefix, user_key)
      stored_value, flags = _validate_encode_value(value, self._do_pickle)
      server_keys.append(server_key)

      item = request.add_item()
      item.set_key(server_key)
      item.set_value(stored_value)
      item.set_flags(flags)
      item.set_set_policy(policy)
      item.set_expiration_time(int(math.ceil(time)))
    namespace_manager._add_name_space(request, namespace)

    response = MemcacheSetResponse()
    try:
      self._make_sync_call('memcache', 'Set', request, response)
    except apiproxy_errors.Error:
      return user_key.values()

    assert response.set_status_size() == len(server_keys)

    unset_list = []
    for server_key, set_status in zip(server_keys, response.set_status_list()):
      if set_status != MemcacheSetResponse.STORED:
        unset_list.append(user_key[server_key])

    return unset_list

  def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0,
                namespace=None):
    """Set multiple keys' values at once, regardless of previous contents.

    Args:
      mapping: Dictionary of keys to values.
      time: Optional expiration time, either relative number of seconds
        from current time (up to 1 month), or an absolute Unix epoch time.
        By default, items never expire, though items may be evicted due to
        memory pressure.  Float values will be rounded up to the nearest
        whole second.
      key_prefix: Prefix for to prepend to all keys.
      min_compress_len: Unimplemented compatibility option.
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      A list of keys whose values were NOT set.  On total success,
      this list should be empty.
    """
    return self._set_multi_with_policy(MemcacheSetRequest.SET, mapping,
                                       time=time, key_prefix=key_prefix,
                                       namespace=namespace)

  def add_multi(self, mapping, time=0, key_prefix='', min_compress_len=0,
                namespace=None):
    """Set multiple keys' values iff items are not already in memcache.

    Args:
      mapping: Dictionary of keys to values.
      time: Optional expiration time, either relative number of seconds
        from current time (up to 1 month), or an absolute Unix epoch time.
        By default, items never expire, though items may be evicted due to
        memory pressure.  Float values will be rounded up to the nearest
        whole second.
      key_prefix: Prefix for to prepend to all keys.
      min_compress_len: Unimplemented compatibility option.
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      A list of keys whose values were NOT set because they did not already
      exist in memcache.  On total success, this list should be empty.
    """
    return self._set_multi_with_policy(MemcacheSetRequest.ADD, mapping,
                                       time=time, key_prefix=key_prefix,
                                       namespace=namespace)

  def replace_multi(self, mapping, time=0, key_prefix='', min_compress_len=0,
                    namespace=None):
    """Replace multiple keys' values, failing if the items aren't in memcache.

    Args:
      mapping: Dictionary of keys to values.
      time: Optional expiration time, either relative number of seconds
        from current time (up to 1 month), or an absolute Unix epoch time.
        By default, items never expire, though items may be evicted due to
        memory pressure.  Float values will be rounded up to the nearest
        whole second.
      key_prefix: Prefix for to prepend to all keys.
      min_compress_len: Unimplemented compatibility option.
      namespace: a string specifying an optional namespace to use in
        the request.

    Returns:
      A list of keys whose values were NOT set because they already existed
      in memcache.  On total success, this list should be empty.
    """
    return self._set_multi_with_policy(MemcacheSetRequest.REPLACE, mapping,
                                       time=time, key_prefix=key_prefix,
                                       namespace=namespace)

  def incr(self, key, delta=1, namespace=None, initial_value=None):
    """Atomically increments a key's value.

    Internally, the value is a unsigned 64-bit integer.  Memcache
    doesn't check 64-bit overflows.  The value, if too large, will
    wrap around.

    Unless an initial_value is specified, the key must already exist
    in the cache to be incremented.  To initialize a counter, either
    specify initial_value or set() it to the initial value, as an
    ASCII decimal integer.  Future get()s of the key, post-increment,
    will still be an ASCII decimal value.

    Args:
      key: Key to increment.  See Client's docstring for details.
      delta: Non-negative integer value (int or long) to increment key by,
        defaulting to 1.
      namespace: a string specifying an optional namespace to use in
        the request.
      initial_value: initial value to put in the cache, if it doesn't
        already exist.  The default value, None, will not create a cache
        entry if it doesn't already exist.

    Returns:
      New long integer value, or None if key was not in the cache, could not
      be incremented for any other reason, or a network/RPC/server error
      occurred.

    Raises:
      ValueError: If number is negative.
      TypeError: If delta isn't an int or long.
    """
    return self._incrdecr(key, False, delta, namespace=namespace,
                          initial_value=initial_value)

  def decr(self, key, delta=1, namespace=None, initial_value=None):
    """Atomically decrements a key's value.

    Internally, the value is a unsigned 64-bit integer.  Memcache
    caps decrementing below zero to zero.

    The key must already exist in the cache to be decremented.  See
    docs on incr() for details.

    Args:
      key: Key to decrement.  See Client's docstring for details.
      delta: Non-negative integer value (int or long) to decrement key by,
        defaulting to 1.
      namespace: a string specifying an optional namespace to use in
        the request.
      initial_value: initial value to put in the cache, if it doesn't
        already exist.  The default value, None, will not create a cache
        entry if it doesn't already exist.

    Returns:
      New long integer value, or None if key wasn't in cache and couldn't
      be decremented, or a network/RPC/server error occurred.

    Raises:
      ValueError: If number is negative.
      TypeError: If delta isn't an int or long.
    """
    return self._incrdecr(key, True, delta, namespace=namespace,
                          initial_value=initial_value)

  def _incrdecr(self, key, is_negative, delta, namespace=None,
                initial_value=None):
    """Increment or decrement a key by a provided delta.

    Args:
      key: Key to increment or decrement.
      is_negative: Boolean, if this is a decrement.
      delta: Non-negative integer amount (int or long) to increment
        or decrement by.
      namespace: a string specifying an optional namespace to use in
        the request.
      initial_value: initial value to put in the cache, if it doesn't
        already exist.  The default value, None, will not create a cache
        entry if it doesn't already exist.

    Returns:
      New long integer value, or None on cache miss or network/RPC/server
      error.

    Raises:
      ValueError: If delta is negative.
      TypeError: If delta isn't an int or long.
    """
    if not isinstance(delta, (int, long)):
      raise TypeError('Delta must be an integer or long, received %r' % delta)
    if delta < 0:
      raise ValueError('Delta must not be negative.')

    request = MemcacheIncrementRequest()
    namespace_manager._add_name_space(request, namespace)
    response = MemcacheIncrementResponse()
    request.set_key(_key_string(key))
    request.set_delta(delta)
    if is_negative:
      request.set_direction(MemcacheIncrementRequest.DECREMENT)
    else:
      request.set_direction(MemcacheIncrementRequest.INCREMENT)
    if initial_value is not None:
      request.set_initial_value(long(initial_value))

    try:
      self._make_sync_call('memcache', 'Increment', request, response)
    except apiproxy_errors.Error:
      return None

    if response.has_new_value():
      return response.new_value()
    return None


_CLIENT = None


def setup_client(client_obj):
  """Sets the Client object instance to use for all module-level methods.

  Use this method if you want to have customer persistent_id() or
  persistent_load() functions associated with your client.

  Args:
    client_obj: Instance of the memcache.Client object.
  """
  global _CLIENT
  var_dict = globals()

  _CLIENT = client_obj
  var_dict['set_servers'] = _CLIENT.set_servers
  var_dict['disconnect_all'] = _CLIENT.disconnect_all
  var_dict['forget_dead_hosts'] = _CLIENT.forget_dead_hosts
  var_dict['debuglog'] = _CLIENT.debuglog
  var_dict['get'] = _CLIENT.get
  var_dict['get_multi'] = _CLIENT.get_multi
  var_dict['set'] = _CLIENT.set
  var_dict['set_multi'] = _CLIENT.set_multi
  var_dict['add'] = _CLIENT.add
  var_dict['add_multi'] = _CLIENT.add_multi
  var_dict['replace'] = _CLIENT.replace
  var_dict['replace_multi'] = _CLIENT.replace_multi
  var_dict['delete'] = _CLIENT.delete
  var_dict['delete_multi'] = _CLIENT.delete_multi
  var_dict['incr'] = _CLIENT.incr
  var_dict['decr'] = _CLIENT.decr
  var_dict['flush_all'] = _CLIENT.flush_all
  var_dict['get_stats'] = _CLIENT.get_stats


setup_client(Client())