diff --git a/icarus-miner/Makefile b/icarus-miner/Makefile deleted file mode 100644 index 7812fca..0000000 --- a/icarus-miner/Makefile +++ /dev/null @@ -1,45 +0,0 @@ -# -# This is free software, licensed under the GNU General Public License v2. -# See /LICENSE for more information. - -# those files can found under: so far those python build for ar71xx (tl-wr1043nd) -# pyserial_2.4-1_ar71xx.ipk -# python_2.6.4-3_ar71xx.ipk -# python-mini_2.6.4-3_ar71xx.ipk -# https://github.com/ngzhang/Icarus/tree/master/miner_software - -# more info please check: -# http://en.qi-hardware.com/wiki/Icarus#Using_TP-link.2Ftl-wr1043nd_as_host - -include $(TOPDIR)/rules.mk - -PKG_NAME:=icarus-miner -PKG_VERSION:=20120209 - -include $(INCLUDE_DIR)/package.mk - -define Package/icarus-miner - PKGARCH:=all - MAINTAINER:="Xiangfu Liu" - TITLE:=icarus miner software - SECTION:=utils - CATEGORY:=Utilities - URL:=http://en.qi-hardware.com/wiki/Icarus - DEPENDS:=+librt +libpthread +libffi +zlib +kmod-usb-serial-pl2303 -endef - -define Build/Compile -endef - -define Package/icarus-miner/install - $(CP) ./data/* $(1)/ -endef - -define Package/icarus-miner/postinst -#!/bin/sh -crontab /root/crontab.icarus -/etc/init.d/cron enable -/etc/init.d/cron restart -endef - -$(eval $(call BuildPackage,icarus-miner)) diff --git a/icarus-miner/data/root/crontab.icarus b/icarus-miner/data/root/crontab.icarus deleted file mode 100644 index cfaa54b..0000000 --- a/icarus-miner/data/root/crontab.icarus +++ /dev/null @@ -1 +0,0 @@ -*/10 * * * * cd /root/scripts && ./icarus_monitor.sh diff --git a/icarus-miner/data/root/queue_ver/miner.py b/icarus-miner/data/root/queue_ver/miner.py deleted file mode 100755 index 07f2798..0000000 --- a/icarus-miner/data/root/queue_ver/miner.py +++ /dev/null @@ -1,323 +0,0 @@ -#!/usr/bin/env python - -# by teknohog - -# Python wrapper for my serial port FPGA Bitcoin miners - -from jsonrpc import ServiceProxy -from time import ctime, sleep, time -from serial import Serial -from threading import Thread, Event, Lock -from Queue import Queue -from optparse import OptionParser - -def stats(count, starttime): - # 2**32 hashes per share (difficulty 1) - mhshare = 4294.967296 - - s = sum(count) - tdelta = time() - starttime - rate = s * mhshare / tdelta - - # This is only a rough estimate of the true hash rate, - # particularly when the number of events is low. However, since - # the events follow a Poisson distribution, we can estimate the - # standard deviation (sqrt(n) for n events). Thus we get some idea - # on how rough an estimate this is. - - # s should always be positive when this function is called, but - # checking for robustness anyway - if s > 0: - stddev = rate / s**0.5 - else: - stddev = 0 - - return "[%i accepted, %i failed, %.2f +/- %.2f Mhash/s]" % (count[0], count[1], rate, stddev) - -class Reader(Thread): - def __init__(self): - Thread.__init__(self) - - self.daemon = True - - # flush the input buffer - #ser.read(1000) - - def run(self): - while True: - nonce = ser.read(4) - - if len(nonce) == 4: - # Keep this order, because writer.block will be - # updated due to the golden event. - submitter = Submitter(writer.block, nonce) - submitter.start() - if options.debug: - print("raise golden event\n") - golden.set() - - -class Writer(Thread): - def __init__(self): - Thread.__init__(self) - - # Keep something sensible available while waiting for the - # first getwork - #self.block = "0" * 256 - #self.midstate = "0" * 64 - - # This will produce nonce 063c5e01 -> debug by using a bogus URL - self.block = "0000000120c8222d0497a7ab44a1a2c7bf39de941c9970b1dc7cdc400000079700000000e88aabe1f353238c668d8a4df9318e614c10c474f8cdf8bc5f6397b946c33d7c4e7242c31a098ea500000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" - self.midstate = "33c5bf5751ec7f7e056443b5aee3800331432c83f404d9de38b94ecbf907b92d" - - self.daemon = True - - def run(self): - while True: - result =0 - #try: - # work = bitcoin.getwork() - # self.block = work['data'] - # self.midstate = work['midstate'] - #except: - # print("RPC getwork error") - # In this case, keep crunching with the old data. It will get - # stale at some point, but it's better than doing nothing. - - # Just a reminder of how Python slices work in reverse - #rdata = self.block.decode('hex')[::-1] - #rdata2 = rdata[32:64] - work = wq.read_work_queue() - self.block = work['data'] - self.midstate = work['midstate'] - #print("push work to miner") - rdata2 = self.block.decode('hex')[95:63:-1] - - rmid = self.midstate.decode('hex')[::-1] - - payload = rmid + rdata2 - - ser.write(payload) - result = golden.wait(options.askrate) - - if result: - golden.clear() - if options.debug: - print("clear golden event") - -class WorkQueue: - def __init__(self, max_num): - self.max_num = max_num+1 - self.ptr = 0 - self.ptr_tobe = 0; - self.tail = 0 - self.in_wr = 0; - self.work = {} - self.work_queue = [] - for i in range(self.max_num): - self.work_queue.append({}) - def get_from_server(self): - get_success = 0 - while get_success != 1 : - try: - self.work_queue[self.ptr_tobe] = bitcoin.getwork() - get_success = 1 - except: - print("RPC getwork error") - def is_full(self): - ptr_mutex.acquire() - full = (self.ptr + 1) % self.max_num == self.tail - ptr_mutex.release() - return full - def write_work_queue(self): - #print("update work queue") - - ptr_mutex.acquire() - if (self.ptr + 1) % self.max_num == self.tail: - if options.debug: - print("Queue is full") - self.tail = (self.tail + 1) % self.max_num - self.ptr_tobe = (self.ptr + 1) % self.max_num - #print("write0:tail=", self.tail, "ptr_tobe=", self.ptr_tobe, "ptr="+self.ptr) - if options.debug: - print "write0:tail=%d, ptr_tobe=%d, ptr=%d" % (self.tail, self.ptr_tobe, self.ptr) - ptr_mutex.release() - #self.work_queue[self.ptr] = bitcoin.getwork() - - write_queue_mutex.acquire() - self.get_from_server() - write_queue_mutex.release() - - ptr_mutex.acquire() - if (self.ptr + 1) % self.max_num != self.ptr_tobe: - self.work_queue[self.ptr] = self.work_queue[self.ptr_tobe] - else: - self.ptr = self.ptr_tobe - if options.debug: - print "write1:tail=%d, ptr_tobe=%d, ptr=%d" % (self.tail, self.ptr_tobe, self.ptr) - #print("write1:tail="+self.tail+"ptr_tobe="+self.ptr_tobe+"ptr="+self.ptr) - ptr_mutex.release() - - #print("1update work queue") - - def read_work_queue(self): - ptr_mutex.acquire() - #print("read from queue") - if options.debug: - print"read0:tail=%d, ptr=%d" % (self.tail, self.ptr) - if self.ptr == self.tail: - ptr_mutex.release() - #print("Queue is empty") - #print("1read from queue") - write_queue_mutex.acquire() - ptr_mutex.acquire() - if self.ptr == self.tail: - print("reader get queue") - self.ptr_tobe = (self.ptr + 1) % self.max_num - self.get_from_server() - self.ptr = self.ptr_tobe - write_queue_mutex.release() - - self.work = self.work_queue[self.ptr] - - if self.ptr == 0: - self.ptr = self.max_num - 1 - else: - self.ptr = self.ptr - 1 - #print("read0:tail="+self.tail+"ptr="+self.ptr) - if options.debug: - print"read1:tail=%d, ptr=%d" % (self.tail, self.ptr) - - ptr_mutex.release() - - return self.work - - -class GetWorkQueue(Thread): - def __init__(self): - Thread.__init__(self) - self.daemon = True - self.delay = (options.askrate>>1)+1 - def run(self): - while True: - if options.debug: - print("GetWorkQueue thread") - wq.write_work_queue() - #if (self.ptr + 1) % self.max_num == self.tail: - if(wq.is_full()): - sleep(4) - if options.debug: - print("queue is full, slow down request") - else: - if options.debug: - print("****\nfill the work queue at speed\n****") - #else: - # sleep(2) - - -class Submitter(Thread): - def __init__(self, block, nonce): - Thread.__init__(self) - - self.block = block - self.nonce = nonce - - def run(self): - # This thread will be created upon every submit, as they may - # come in sooner than the submits finish. - - print("Block found on " + ctime() + "\n") - - if stride > 0: - n = self.nonce.encode('hex') - print(n + " % " + str(stride) + " = " + str(int(n, 16) % stride)) - elif options.debug: - print(self.nonce.encode('hex')) - - hrnonce = self.nonce[::-1].encode('hex') - - data = self.block[:152] + hrnonce + self.block[160:] - - try: - result = bitcoin.getwork(data) - print("Upstream result: " + str(result)) - print(self.nonce.encode('hex')) - except: - print("RPC send error") - print(self.nonce.encode('hex')) - # a sensible boolean for stats - result = False - - results_queue.put(result) - -class Display_stats(Thread): - def __init__(self): - Thread.__init__(self) - - self.count = [0, 0] - self.starttime = time() - self.daemon = True - - print("Miner started on " + ctime()) - - def run(self): - while True: - result = results_queue.get() - - if result: - self.count[0] += 1 - else: - self.count[1] += 1 - - print(stats(self.count, self.starttime)) - - results_queue.task_done() - -parser = OptionParser() - -parser.add_option("-a", "--askrate", dest="askrate", default=8, help="Seconds between getwork requests") - -parser.add_option("-d", "--debug", dest="debug", default=False, action="store_true", help="Show each nonce result in hex") - -parser.add_option("-m", "--miners", dest="miners", default=0, help="Show the nonce result remainder mod MINERS, to identify the node in a cluster") - -parser.add_option("-u", "--url", dest="url", default="http://test_fpga_btc@hotmail.com:lzhjxntswc@pit.deepbit.net:8332/", help="URL for bitcoind or mining pool, typically http://user:password@host:8332/") - -parser.add_option("-s", "--serial", dest="serial_port", default="com3", help="Serial port, e.g. /dev/ttyS0 on unix or COM1 in Windows") - -(options, args) = parser.parse_args() - -stride = int(options.miners) - -golden = Event() - -ptr_mutex = Lock(); -write_queue_mutex = Lock(); - -bitcoin = ServiceProxy(options.url) - -results_queue = Queue() - -ser = Serial(options.serial_port, 115200, timeout=options.askrate) - -wq = WorkQueue(5) - -reader = Reader() -writer = Writer() -get_work_queue = GetWorkQueue() -disp = Display_stats() - -get_work_queue.start() -reader.start() -writer.start() -disp.start() - -try: - while True: - # Threads are generally hard to interrupt. So they are left - # running as daemons, and we do something simple here that can - # be easily terminated to bring down the entire script. - sleep(10000) -except KeyboardInterrupt: - print("Terminated") - diff --git a/icarus-miner/data/root/scripts/icarus_monitor.sh b/icarus-miner/data/root/scripts/icarus_monitor.sh deleted file mode 100755 index 8183f1a..0000000 --- a/icarus-miner/data/root/scripts/icarus_monitor.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/sh - -API_KEY=http://deepbit.net/api/4edf2d91069172fdae000000_DE38384EE2 -WORKER=http://xiangfu.z@gmail.com_1:1234@pit.deepbit.net:8332/ - -################################################ - -SCRIPT_PATH=`pwd` - -${SCRIPT_PATH}/icarus_undermanager.py -a ${API_KEY} > ${SCRIPT_PATH}/u.log 2>&1 -date >> ${SCRIPT_PATH}/u.log - -TRUE_COUNT=`less ${SCRIPT_PATH}/u.log | grep "\"alive\": true" | wc -l` -HASHRATE=`less ${SCRIPT_PATH}/u.log | grep "\"hashrate\": 0," | wc -l` - -if [ "${TRUE_COUNT}" == "0" ] || [ "${HASHRATE}" == "1" ]; then - echo `date` >> ${SCRIPT_PATH}/restart.log - - ps ax | grep "python.*miner.py" | grep -v grep | sed 's/^ *//' | cut -d ' ' -f 1 | xargs kill -15 - - ICARUS_MINING_PATH="../queue_ver" - (cd ${ICARUS_MINING_PATH} && ./miner.py -u "${WORKER}" -s /dev/ttyUSB0 > /dev/null 2>&1 &) -fi diff --git a/icarus-miner/data/root/scripts/icarus_undermanager.py b/icarus-miner/data/root/scripts/icarus_undermanager.py deleted file mode 100755 index 49dbbd8..0000000 --- a/icarus-miner/data/root/scripts/icarus_undermanager.py +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env python - -import json, urllib -from optparse import OptionParser - -class Deepbit(object): - @staticmethod - def get_stats(url): - try: - result = json.load(urllib.urlopen(url)) - except: - # An error occurred; raise an exception - raise NameError('Could not get the data, sorry. Maybe a non-functional internet connection or wrong API key?') - return result - -try: - parser = OptionParser() - parser.add_option("-a", - "--api-key", - dest="api", - default="http://deepbit.net/api/4edf2d91069172fdae000000_DE38384EE2", - help="JSON API key") - - (options, args) = parser.parse_args() - - print json.dumps(Deepbit.get_stats(options.api), indent=2) -except Exception as e: - print e diff --git a/icarus-miner/data/root/scripts/restart.sh b/icarus-miner/data/root/scripts/restart.sh deleted file mode 100755 index ac72e05..0000000 --- a/icarus-miner/data/root/scripts/restart.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh - -WORKER=http://xiangfu.z@gmail.com_1:1234@pit.deepbit.net:8332/ - -ps ax | grep "python.*miner.py" | grep -v grep | sed 's/^ *//' | cut -d ' ' -f 1 | xargs kill -15 - -ICARUS_MINING_PATH="../queue_ver" -(cd ${ICARUS_MINING_PATH} && ./miner.py -u "${WORKER}" -s /dev/ttyUSB0 > /dev/null 2>&1 &) diff --git a/icarus-miner/data/root/scripts/u.log b/icarus-miner/data/root/scripts/u.log deleted file mode 100644 index 7a33c8b..0000000 --- a/icarus-miner/data/root/scripts/u.log +++ /dev/null @@ -1 +0,0 @@ -icarus_monitor.sh not start yet diff --git a/icarus-miner/data/usr/bin/python b/icarus-miner/data/usr/bin/python deleted file mode 120000 index 7453c36..0000000 --- a/icarus-miner/data/usr/bin/python +++ /dev/null @@ -1 +0,0 @@ -python2.6 \ No newline at end of file diff --git a/icarus-miner/data/usr/bin/python2.6 b/icarus-miner/data/usr/bin/python2.6 deleted file mode 100755 index fea28ce..0000000 Binary files a/icarus-miner/data/usr/bin/python2.6 and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/Queue.py b/icarus-miner/data/usr/lib/python2.6/Queue.py deleted file mode 100644 index 773b680..0000000 --- a/icarus-miner/data/usr/lib/python2.6/Queue.py +++ /dev/null @@ -1,244 +0,0 @@ -"""A multi-producer, multi-consumer queue.""" - -from time import time as _time -from collections import deque -import heapq - -__all__ = ['Empty', 'Full', 'Queue', 'PriorityQueue', 'LifoQueue'] - -class Empty(Exception): - "Exception raised by Queue.get(block=0)/get_nowait()." - pass - -class Full(Exception): - "Exception raised by Queue.put(block=0)/put_nowait()." - pass - -class Queue: - """Create a queue object with a given maximum size. - - If maxsize is <= 0, the queue size is infinite. - """ - def __init__(self, maxsize=0): - try: - import threading - except ImportError: - import dummy_threading as threading - self.maxsize = maxsize - self._init(maxsize) - # mutex must be held whenever the queue is mutating. All methods - # that acquire mutex must release it before returning. mutex - # is shared between the three conditions, so acquiring and - # releasing the conditions also acquires and releases mutex. - self.mutex = threading.Lock() - # Notify not_empty whenever an item is added to the queue; a - # thread waiting to get is notified then. - self.not_empty = threading.Condition(self.mutex) - # Notify not_full whenever an item is removed from the queue; - # a thread waiting to put is notified then. - self.not_full = threading.Condition(self.mutex) - # Notify all_tasks_done whenever the number of unfinished tasks - # drops to zero; thread waiting to join() is notified to resume - self.all_tasks_done = threading.Condition(self.mutex) - self.unfinished_tasks = 0 - - def task_done(self): - """Indicate that a formerly enqueued task is complete. - - Used by Queue consumer threads. For each get() used to fetch a task, - a subsequent call to task_done() tells the queue that the processing - on the task is complete. - - If a join() is currently blocking, it will resume when all items - have been processed (meaning that a task_done() call was received - for every item that had been put() into the queue). - - Raises a ValueError if called more times than there were items - placed in the queue. - """ - self.all_tasks_done.acquire() - try: - unfinished = self.unfinished_tasks - 1 - if unfinished <= 0: - if unfinished < 0: - raise ValueError('task_done() called too many times') - self.all_tasks_done.notify_all() - self.unfinished_tasks = unfinished - finally: - self.all_tasks_done.release() - - def join(self): - """Blocks until all items in the Queue have been gotten and processed. - - The count of unfinished tasks goes up whenever an item is added to the - queue. The count goes down whenever a consumer thread calls task_done() - to indicate the item was retrieved and all work on it is complete. - - When the count of unfinished tasks drops to zero, join() unblocks. - """ - self.all_tasks_done.acquire() - try: - while self.unfinished_tasks: - self.all_tasks_done.wait() - finally: - self.all_tasks_done.release() - - def qsize(self): - """Return the approximate size of the queue (not reliable!).""" - self.mutex.acquire() - n = self._qsize() - self.mutex.release() - return n - - def empty(self): - """Return True if the queue is empty, False otherwise (not reliable!).""" - self.mutex.acquire() - n = not self._qsize() - self.mutex.release() - return n - - def full(self): - """Return True if the queue is full, False otherwise (not reliable!).""" - self.mutex.acquire() - n = 0 < self.maxsize == self._qsize() - self.mutex.release() - return n - - def put(self, item, block=True, timeout=None): - """Put an item into the queue. - - If optional args 'block' is true and 'timeout' is None (the default), - block if necessary until a free slot is available. If 'timeout' is - a positive number, it blocks at most 'timeout' seconds and raises - the Full exception if no free slot was available within that time. - Otherwise ('block' is false), put an item on the queue if a free slot - is immediately available, else raise the Full exception ('timeout' - is ignored in that case). - """ - self.not_full.acquire() - try: - if self.maxsize > 0: - if not block: - if self._qsize() == self.maxsize: - raise Full - elif timeout is None: - while self._qsize() == self.maxsize: - self.not_full.wait() - elif timeout < 0: - raise ValueError("'timeout' must be a positive number") - else: - endtime = _time() + timeout - while self._qsize() == self.maxsize: - remaining = endtime - _time() - if remaining <= 0.0: - raise Full - self.not_full.wait(remaining) - self._put(item) - self.unfinished_tasks += 1 - self.not_empty.notify() - finally: - self.not_full.release() - - def put_nowait(self, item): - """Put an item into the queue without blocking. - - Only enqueue the item if a free slot is immediately available. - Otherwise raise the Full exception. - """ - return self.put(item, False) - - def get(self, block=True, timeout=None): - """Remove and return an item from the queue. - - If optional args 'block' is true and 'timeout' is None (the default), - block if necessary until an item is available. If 'timeout' is - a positive number, it blocks at most 'timeout' seconds and raises - the Empty exception if no item was available within that time. - Otherwise ('block' is false), return an item if one is immediately - available, else raise the Empty exception ('timeout' is ignored - in that case). - """ - self.not_empty.acquire() - try: - if not block: - if not self._qsize(): - raise Empty - elif timeout is None: - while not self._qsize(): - self.not_empty.wait() - elif timeout < 0: - raise ValueError("'timeout' must be a positive number") - else: - endtime = _time() + timeout - while not self._qsize(): - remaining = endtime - _time() - if remaining <= 0.0: - raise Empty - self.not_empty.wait(remaining) - item = self._get() - self.not_full.notify() - return item - finally: - self.not_empty.release() - - def get_nowait(self): - """Remove and return an item from the queue without blocking. - - Only get an item if one is immediately available. Otherwise - raise the Empty exception. - """ - return self.get(False) - - # Override these methods to implement other queue organizations - # (e.g. stack or priority queue). - # These will only be called with appropriate locks held - - # Initialize the queue representation - def _init(self, maxsize): - self.queue = deque() - - def _qsize(self, len=len): - return len(self.queue) - - # Put a new item in the queue - def _put(self, item): - self.queue.append(item) - - # Get an item from the queue - def _get(self): - return self.queue.popleft() - - -class PriorityQueue(Queue): - '''Variant of Queue that retrieves open entries in priority order (lowest first). - - Entries are typically tuples of the form: (priority number, data). - ''' - - def _init(self, maxsize): - self.queue = [] - - def _qsize(self, len=len): - return len(self.queue) - - def _put(self, item, heappush=heapq.heappush): - heappush(self.queue, item) - - def _get(self, heappop=heapq.heappop): - return heappop(self.queue) - - -class LifoQueue(Queue): - '''Variant of Queue that retrieves most recently added entries first.''' - - def _init(self, maxsize): - self.queue = [] - - def _qsize(self, len=len): - return len(self.queue) - - def _put(self, item): - self.queue.append(item) - - def _get(self): - return self.queue.pop() diff --git a/icarus-miner/data/usr/lib/python2.6/Queue.pyc b/icarus-miner/data/usr/lib/python2.6/Queue.pyc deleted file mode 100644 index 43bd56f..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/Queue.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/StringIO.py b/icarus-miner/data/usr/lib/python2.6/StringIO.py deleted file mode 100644 index 232009f..0000000 --- a/icarus-miner/data/usr/lib/python2.6/StringIO.py +++ /dev/null @@ -1,323 +0,0 @@ -r"""File-like objects that read from or write to a string buffer. - -This implements (nearly) all stdio methods. - -f = StringIO() # ready for writing -f = StringIO(buf) # ready for reading -f.close() # explicitly release resources held -flag = f.isatty() # always false -pos = f.tell() # get current position -f.seek(pos) # set current position -f.seek(pos, mode) # mode 0: absolute; 1: relative; 2: relative to EOF -buf = f.read() # read until EOF -buf = f.read(n) # read up to n bytes -buf = f.readline() # read until end of line ('\n') or EOF -list = f.readlines()# list of f.readline() results until EOF -f.truncate([size]) # truncate file at to at most size (default: current pos) -f.write(buf) # write at current position -f.writelines(list) # for line in list: f.write(line) -f.getvalue() # return whole file's contents as a string - -Notes: -- Using a real file is often faster (but less convenient). -- There's also a much faster implementation in C, called cStringIO, but - it's not subclassable. -- fileno() is left unimplemented so that code which uses it triggers - an exception early. -- Seeking far beyond EOF and then writing will insert real null - bytes that occupy space in the buffer. -- There's a simple test set (see end of this file). -""" -try: - from errno import EINVAL -except ImportError: - EINVAL = 22 - -__all__ = ["StringIO"] - -def _complain_ifclosed(closed): - if closed: - raise ValueError, "I/O operation on closed file" - -class StringIO: - """class StringIO([buffer]) - - When a StringIO object is created, it can be initialized to an existing - string by passing the string to the constructor. If no string is given, - the StringIO will start empty. - - The StringIO object can accept either Unicode or 8-bit strings, but - mixing the two may take some care. If both are used, 8-bit strings that - cannot be interpreted as 7-bit ASCII (that use the 8th bit) will cause - a UnicodeError to be raised when getvalue() is called. - """ - def __init__(self, buf = ''): - # Force self.buf to be a string or unicode - if not isinstance(buf, basestring): - buf = str(buf) - self.buf = buf - self.len = len(buf) - self.buflist = [] - self.pos = 0 - self.closed = False - self.softspace = 0 - - def __iter__(self): - return self - - def next(self): - """A file object is its own iterator, for example iter(f) returns f - (unless f is closed). When a file is used as an iterator, typically - in a for loop (for example, for line in f: print line), the next() - method is called repeatedly. This method returns the next input line, - or raises StopIteration when EOF is hit. - """ - _complain_ifclosed(self.closed) - r = self.readline() - if not r: - raise StopIteration - return r - - def close(self): - """Free the memory buffer. - """ - if not self.closed: - self.closed = True - del self.buf, self.pos - - def isatty(self): - """Returns False because StringIO objects are not connected to a - tty-like device. - """ - _complain_ifclosed(self.closed) - return False - - def seek(self, pos, mode = 0): - """Set the file's current position. - - The mode argument is optional and defaults to 0 (absolute file - positioning); other values are 1 (seek relative to the current - position) and 2 (seek relative to the file's end). - - There is no return value. - """ - _complain_ifclosed(self.closed) - if self.buflist: - self.buf += ''.join(self.buflist) - self.buflist = [] - if mode == 1: - pos += self.pos - elif mode == 2: - pos += self.len - self.pos = max(0, pos) - - def tell(self): - """Return the file's current position.""" - _complain_ifclosed(self.closed) - return self.pos - - def read(self, n = -1): - """Read at most size bytes from the file - (less if the read hits EOF before obtaining size bytes). - - If the size argument is negative or omitted, read all data until EOF - is reached. The bytes are returned as a string object. An empty - string is returned when EOF is encountered immediately. - """ - _complain_ifclosed(self.closed) - if self.buflist: - self.buf += ''.join(self.buflist) - self.buflist = [] - if n < 0: - newpos = self.len - else: - newpos = min(self.pos+n, self.len) - r = self.buf[self.pos:newpos] - self.pos = newpos - return r - - def readline(self, length=None): - r"""Read one entire line from the file. - - A trailing newline character is kept in the string (but may be absent - when a file ends with an incomplete line). If the size argument is - present and non-negative, it is a maximum byte count (including the - trailing newline) and an incomplete line may be returned. - - An empty string is returned only when EOF is encountered immediately. - - Note: Unlike stdio's fgets(), the returned string contains null - characters ('\0') if they occurred in the input. - """ - _complain_ifclosed(self.closed) - if self.buflist: - self.buf += ''.join(self.buflist) - self.buflist = [] - i = self.buf.find('\n', self.pos) - if i < 0: - newpos = self.len - else: - newpos = i+1 - if length is not None: - if self.pos + length < newpos: - newpos = self.pos + length - r = self.buf[self.pos:newpos] - self.pos = newpos - return r - - def readlines(self, sizehint = 0): - """Read until EOF using readline() and return a list containing the - lines thus read. - - If the optional sizehint argument is present, instead of reading up - to EOF, whole lines totalling approximately sizehint bytes (or more - to accommodate a final whole line). - """ - total = 0 - lines = [] - line = self.readline() - while line: - lines.append(line) - total += len(line) - if 0 < sizehint <= total: - break - line = self.readline() - return lines - - def truncate(self, size=None): - """Truncate the file's size. - - If the optional size argument is present, the file is truncated to - (at most) that size. The size defaults to the current position. - The current file position is not changed unless the position - is beyond the new file size. - - If the specified size exceeds the file's current size, the - file remains unchanged. - """ - _complain_ifclosed(self.closed) - if size is None: - size = self.pos - elif size < 0: - raise IOError(EINVAL, "Negative size not allowed") - elif size < self.pos: - self.pos = size - self.buf = self.getvalue()[:size] - self.len = size - - def write(self, s): - """Write a string to the file. - - There is no return value. - """ - _complain_ifclosed(self.closed) - if not s: return - # Force s to be a string or unicode - if not isinstance(s, basestring): - s = str(s) - spos = self.pos - slen = self.len - if spos == slen: - self.buflist.append(s) - self.len = self.pos = spos + len(s) - return - if spos > slen: - self.buflist.append('\0'*(spos - slen)) - slen = spos - newpos = spos + len(s) - if spos < slen: - if self.buflist: - self.buf += ''.join(self.buflist) - self.buflist = [self.buf[:spos], s, self.buf[newpos:]] - self.buf = '' - if newpos > slen: - slen = newpos - else: - self.buflist.append(s) - slen = newpos - self.len = slen - self.pos = newpos - - def writelines(self, iterable): - """Write a sequence of strings to the file. The sequence can be any - iterable object producing strings, typically a list of strings. There - is no return value. - - (The name is intended to match readlines(); writelines() does not add - line separators.) - """ - write = self.write - for line in iterable: - write(line) - - def flush(self): - """Flush the internal buffer - """ - _complain_ifclosed(self.closed) - - def getvalue(self): - """ - Retrieve the entire contents of the "file" at any time before - the StringIO object's close() method is called. - - The StringIO object can accept either Unicode or 8-bit strings, - but mixing the two may take some care. If both are used, 8-bit - strings that cannot be interpreted as 7-bit ASCII (that use the - 8th bit) will cause a UnicodeError to be raised when getvalue() - is called. - """ - if self.buflist: - self.buf += ''.join(self.buflist) - self.buflist = [] - return self.buf - - -# A little test suite - -def test(): - import sys - if sys.argv[1:]: - file = sys.argv[1] - else: - file = '/etc/passwd' - lines = open(file, 'r').readlines() - text = open(file, 'r').read() - f = StringIO() - for line in lines[:-2]: - f.write(line) - f.writelines(lines[-2:]) - if f.getvalue() != text: - raise RuntimeError, 'write failed' - length = f.tell() - print 'File length =', length - f.seek(len(lines[0])) - f.write(lines[1]) - f.seek(0) - print 'First line =', repr(f.readline()) - print 'Position =', f.tell() - line = f.readline() - print 'Second line =', repr(line) - f.seek(-len(line), 1) - line2 = f.read(len(line)) - if line != line2: - raise RuntimeError, 'bad result after seek back' - f.seek(len(line2), 1) - list = f.readlines() - line = list[-1] - f.seek(f.tell() - len(line)) - line2 = f.read() - if line != line2: - raise RuntimeError, 'bad result after seek back from EOF' - print 'Read', len(list), 'more lines' - print 'File length =', f.tell() - if f.tell() != length: - raise RuntimeError, 'bad length' - f.truncate(length/2) - f.seek(0, 2) - print 'Truncated length =', f.tell() - if f.tell() != length/2: - raise RuntimeError, 'truncate did not adjust length' - f.close() - -if __name__ == '__main__': - test() diff --git a/icarus-miner/data/usr/lib/python2.6/StringIO.pyc b/icarus-miner/data/usr/lib/python2.6/StringIO.pyc deleted file mode 100644 index c053412..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/StringIO.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/UserDict.py b/icarus-miner/data/usr/lib/python2.6/UserDict.py deleted file mode 100644 index 0d9591a..0000000 --- a/icarus-miner/data/usr/lib/python2.6/UserDict.py +++ /dev/null @@ -1,179 +0,0 @@ -"""A more or less complete user-defined wrapper around dictionary objects.""" - -class UserDict: - def __init__(self, dict=None, **kwargs): - self.data = {} - if dict is not None: - self.update(dict) - if len(kwargs): - self.update(kwargs) - def __repr__(self): return repr(self.data) - def __cmp__(self, dict): - if isinstance(dict, UserDict): - return cmp(self.data, dict.data) - else: - return cmp(self.data, dict) - def __len__(self): return len(self.data) - def __getitem__(self, key): - if key in self.data: - return self.data[key] - if hasattr(self.__class__, "__missing__"): - return self.__class__.__missing__(self, key) - raise KeyError(key) - def __setitem__(self, key, item): self.data[key] = item - def __delitem__(self, key): del self.data[key] - def clear(self): self.data.clear() - def copy(self): - if self.__class__ is UserDict: - return UserDict(self.data.copy()) - import copy - data = self.data - try: - self.data = {} - c = copy.copy(self) - finally: - self.data = data - c.update(self) - return c - def keys(self): return self.data.keys() - def items(self): return self.data.items() - def iteritems(self): return self.data.iteritems() - def iterkeys(self): return self.data.iterkeys() - def itervalues(self): return self.data.itervalues() - def values(self): return self.data.values() - def has_key(self, key): return key in self.data - def update(self, dict=None, **kwargs): - if dict is None: - pass - elif isinstance(dict, UserDict): - self.data.update(dict.data) - elif isinstance(dict, type({})) or not hasattr(dict, 'items'): - self.data.update(dict) - else: - for k, v in dict.items(): - self[k] = v - if len(kwargs): - self.data.update(kwargs) - def get(self, key, failobj=None): - if key not in self: - return failobj - return self[key] - def setdefault(self, key, failobj=None): - if key not in self: - self[key] = failobj - return self[key] - def pop(self, key, *args): - return self.data.pop(key, *args) - def popitem(self): - return self.data.popitem() - def __contains__(self, key): - return key in self.data - @classmethod - def fromkeys(cls, iterable, value=None): - d = cls() - for key in iterable: - d[key] = value - return d - -class IterableUserDict(UserDict): - def __iter__(self): - return iter(self.data) - -import _abcoll -_abcoll.MutableMapping.register(IterableUserDict) - - -class DictMixin: - # Mixin defining all dictionary methods for classes that already have - # a minimum dictionary interface including getitem, setitem, delitem, - # and keys. Without knowledge of the subclass constructor, the mixin - # does not define __init__() or copy(). In addition to the four base - # methods, progressively more efficiency comes with defining - # __contains__(), __iter__(), and iteritems(). - - # second level definitions support higher levels - def __iter__(self): - for k in self.keys(): - yield k - def has_key(self, key): - try: - value = self[key] - except KeyError: - return False - return True - def __contains__(self, key): - return self.has_key(key) - - # third level takes advantage of second level definitions - def iteritems(self): - for k in self: - yield (k, self[k]) - def iterkeys(self): - return self.__iter__() - - # fourth level uses definitions from lower levels - def itervalues(self): - for _, v in self.iteritems(): - yield v - def values(self): - return [v for _, v in self.iteritems()] - def items(self): - return list(self.iteritems()) - def clear(self): - for key in self.keys(): - del self[key] - def setdefault(self, key, default=None): - try: - return self[key] - except KeyError: - self[key] = default - return default - def pop(self, key, *args): - if len(args) > 1: - raise TypeError, "pop expected at most 2 arguments, got "\ - + repr(1 + len(args)) - try: - value = self[key] - except KeyError: - if args: - return args[0] - raise - del self[key] - return value - def popitem(self): - try: - k, v = self.iteritems().next() - except StopIteration: - raise KeyError, 'container is empty' - del self[k] - return (k, v) - def update(self, other=None, **kwargs): - # Make progressively weaker assumptions about "other" - if other is None: - pass - elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups - for k, v in other.iteritems(): - self[k] = v - elif hasattr(other, 'keys'): - for k in other.keys(): - self[k] = other[k] - else: - for k, v in other: - self[k] = v - if kwargs: - self.update(kwargs) - def get(self, key, default=None): - try: - return self[key] - except KeyError: - return default - def __repr__(self): - return repr(dict(self.iteritems())) - def __cmp__(self, other): - if other is None: - return 1 - if isinstance(other, DictMixin): - other = dict(other.iteritems()) - return cmp(dict(self.iteritems()), other) - def __len__(self): - return len(self.keys()) diff --git a/icarus-miner/data/usr/lib/python2.6/UserDict.pyc b/icarus-miner/data/usr/lib/python2.6/UserDict.pyc deleted file mode 100644 index c3bbaea..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/UserDict.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/__init__.py b/icarus-miner/data/usr/lib/python2.6/__init__.py deleted file mode 100644 index aea7c5e..0000000 --- a/icarus-miner/data/usr/lib/python2.6/__init__.py +++ /dev/null @@ -1,157 +0,0 @@ -""" Standard "encodings" Package - - Standard Python encoding modules are stored in this package - directory. - - Codec modules must have names corresponding to normalized encoding - names as defined in the normalize_encoding() function below, e.g. - 'utf-8' must be implemented by the module 'utf_8.py'. - - Each codec module must export the following interface: - - * getregentry() -> codecs.CodecInfo object - The getregentry() API must a CodecInfo object with encoder, decoder, - incrementalencoder, incrementaldecoder, streamwriter and streamreader - atttributes which adhere to the Python Codec Interface Standard. - - In addition, a module may optionally also define the following - APIs which are then used by the package's codec search function: - - * getaliases() -> sequence of encoding name strings to use as aliases - - Alias names returned by getaliases() must be normalized encoding - names as defined by normalize_encoding(). - -Written by Marc-Andre Lemburg (mal@lemburg.com). - -(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. - -"""#" - -import codecs -from encodings import aliases -import __builtin__ - -_cache = {} -_unknown = '--unknown--' -_import_tail = ['*'] -_norm_encoding_map = (' . ' - '0123456789 ABCDEFGHIJKLMNOPQRSTUVWXYZ ' - ' abcdefghijklmnopqrstuvwxyz ' - ' ' - ' ' - ' ') -_aliases = aliases.aliases - -class CodecRegistryError(LookupError, SystemError): - pass - -def normalize_encoding(encoding): - - """ Normalize an encoding name. - - Normalization works as follows: all non-alphanumeric - characters except the dot used for Python package names are - collapsed and replaced with a single underscore, e.g. ' -;#' - becomes '_'. Leading and trailing underscores are removed. - - Note that encoding names should be ASCII only; if they do use - non-ASCII characters, these must be Latin-1 compatible. - - """ - # Make sure we have an 8-bit string, because .translate() works - # differently for Unicode strings. - if hasattr(__builtin__, "unicode") and isinstance(encoding, unicode): - # Note that .encode('latin-1') does *not* use the codec - # registry, so this call doesn't recurse. (See unicodeobject.c - # PyUnicode_AsEncodedString() for details) - encoding = encoding.encode('latin-1') - return '_'.join(encoding.translate(_norm_encoding_map).split()) - -def search_function(encoding): - - # Cache lookup - entry = _cache.get(encoding, _unknown) - if entry is not _unknown: - return entry - - # Import the module: - # - # First try to find an alias for the normalized encoding - # name and lookup the module using the aliased name, then try to - # lookup the module using the standard import scheme, i.e. first - # try in the encodings package, then at top-level. - # - norm_encoding = normalize_encoding(encoding) - aliased_encoding = _aliases.get(norm_encoding) or \ - _aliases.get(norm_encoding.replace('.', '_')) - if aliased_encoding is not None: - modnames = [aliased_encoding, - norm_encoding] - else: - modnames = [norm_encoding] - for modname in modnames: - if not modname or '.' in modname: - continue - try: - # Import is absolute to prevent the possibly malicious import of a - # module with side-effects that is not in the 'encodings' package. - mod = __import__('encodings.' + modname, fromlist=_import_tail, - level=0) - except ImportError: - pass - else: - break - else: - mod = None - - try: - getregentry = mod.getregentry - except AttributeError: - # Not a codec module - mod = None - - if mod is None: - # Cache misses - _cache[encoding] = None - return None - - # Now ask the module for the registry entry - entry = getregentry() - if not isinstance(entry, codecs.CodecInfo): - if not 4 <= len(entry) <= 7: - raise CodecRegistryError,\ - 'module "%s" (%s) failed to register' % \ - (mod.__name__, mod.__file__) - if not callable(entry[0]) or \ - not callable(entry[1]) or \ - (entry[2] is not None and not callable(entry[2])) or \ - (entry[3] is not None and not callable(entry[3])) or \ - (len(entry) > 4 and entry[4] is not None and not callable(entry[4])) or \ - (len(entry) > 5 and entry[5] is not None and not callable(entry[5])): - raise CodecRegistryError,\ - 'incompatible codecs in module "%s" (%s)' % \ - (mod.__name__, mod.__file__) - if len(entry)<7 or entry[6] is None: - entry += (None,)*(6-len(entry)) + (mod.__name__.split(".", 1)[1],) - entry = codecs.CodecInfo(*entry) - - # Cache the codec registry entry - _cache[encoding] = entry - - # Register its aliases (without overwriting previously registered - # aliases) - try: - codecaliases = mod.getaliases() - except AttributeError: - pass - else: - for alias in codecaliases: - if not _aliases.has_key(alias): - _aliases[alias] = modname - - # Return the registry entry - return entry - -# Register the search_function in the Python codec registry -codecs.register(search_function) diff --git a/icarus-miner/data/usr/lib/python2.6/__init__.pyc b/icarus-miner/data/usr/lib/python2.6/__init__.pyc deleted file mode 100644 index f16af0f..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/__init__.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/_abcoll.py b/icarus-miner/data/usr/lib/python2.6/_abcoll.py deleted file mode 100644 index d8cdc98..0000000 --- a/icarus-miner/data/usr/lib/python2.6/_abcoll.py +++ /dev/null @@ -1,562 +0,0 @@ -# Copyright 2007 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Abstract Base Classes (ABCs) for collections, according to PEP 3119. - -DON'T USE THIS MODULE DIRECTLY! The classes here should be imported -via collections; they are defined here only to alleviate certain -bootstrapping issues. Unit tests are in test_collections. -""" - -from abc import ABCMeta, abstractmethod -import sys - -__all__ = ["Hashable", "Iterable", "Iterator", - "Sized", "Container", "Callable", - "Set", "MutableSet", - "Mapping", "MutableMapping", - "MappingView", "KeysView", "ItemsView", "ValuesView", - "Sequence", "MutableSequence", - ] - -### ONE-TRICK PONIES ### - -class Hashable: - __metaclass__ = ABCMeta - - @abstractmethod - def __hash__(self): - return 0 - - @classmethod - def __subclasshook__(cls, C): - if cls is Hashable: - for B in C.__mro__: - if "__hash__" in B.__dict__: - if B.__dict__["__hash__"]: - return True - break - return NotImplemented - - -class Iterable: - __metaclass__ = ABCMeta - - @abstractmethod - def __iter__(self): - while False: - yield None - - @classmethod - def __subclasshook__(cls, C): - if cls is Iterable: - if any("__iter__" in B.__dict__ for B in C.__mro__): - return True - return NotImplemented - -Iterable.register(str) - - -class Iterator(Iterable): - - @abstractmethod - def next(self): - raise StopIteration - - def __iter__(self): - return self - - @classmethod - def __subclasshook__(cls, C): - if cls is Iterator: - if any("next" in B.__dict__ for B in C.__mro__): - return True - return NotImplemented - - -class Sized: - __metaclass__ = ABCMeta - - @abstractmethod - def __len__(self): - return 0 - - @classmethod - def __subclasshook__(cls, C): - if cls is Sized: - if any("__len__" in B.__dict__ for B in C.__mro__): - return True - return NotImplemented - - -class Container: - __metaclass__ = ABCMeta - - @abstractmethod - def __contains__(self, x): - return False - - @classmethod - def __subclasshook__(cls, C): - if cls is Container: - if any("__contains__" in B.__dict__ for B in C.__mro__): - return True - return NotImplemented - - -class Callable: - __metaclass__ = ABCMeta - - @abstractmethod - def __call__(self, *args, **kwds): - return False - - @classmethod - def __subclasshook__(cls, C): - if cls is Callable: - if any("__call__" in B.__dict__ for B in C.__mro__): - return True - return NotImplemented - - -### SETS ### - - -class Set(Sized, Iterable, Container): - """A set is a finite, iterable container. - - This class provides concrete generic implementations of all - methods except for __contains__, __iter__ and __len__. - - To override the comparisons (presumably for speed, as the - semantics are fixed), all you have to do is redefine __le__ and - then the other operations will automatically follow suit. - """ - - def __le__(self, other): - if not isinstance(other, Set): - return NotImplemented - if len(self) > len(other): - return False - for elem in self: - if elem not in other: - return False - return True - - def __lt__(self, other): - if not isinstance(other, Set): - return NotImplemented - return len(self) < len(other) and self.__le__(other) - - def __gt__(self, other): - if not isinstance(other, Set): - return NotImplemented - return other < self - - def __ge__(self, other): - if not isinstance(other, Set): - return NotImplemented - return other <= self - - def __eq__(self, other): - if not isinstance(other, Set): - return NotImplemented - return len(self) == len(other) and self.__le__(other) - - def __ne__(self, other): - return not (self == other) - - @classmethod - def _from_iterable(cls, it): - '''Construct an instance of the class from any iterable input. - - Must override this method if the class constructor signature - does not accept an iterable for an input. - ''' - return cls(it) - - def __and__(self, other): - if not isinstance(other, Iterable): - return NotImplemented - return self._from_iterable(value for value in other if value in self) - - def isdisjoint(self, other): - for value in other: - if value in self: - return False - return True - - def __or__(self, other): - if not isinstance(other, Iterable): - return NotImplemented - chain = (e for s in (self, other) for e in s) - return self._from_iterable(chain) - - def __sub__(self, other): - if not isinstance(other, Set): - if not isinstance(other, Iterable): - return NotImplemented - other = self._from_iterable(other) - return self._from_iterable(value for value in self - if value not in other) - - def __xor__(self, other): - if not isinstance(other, Set): - if not isinstance(other, Iterable): - return NotImplemented - other = self._from_iterable(other) - return (self - other) | (other - self) - - # Sets are not hashable by default, but subclasses can change this - __hash__ = None - - def _hash(self): - """Compute the hash value of a set. - - Note that we don't define __hash__: not all sets are hashable. - But if you define a hashable set type, its __hash__ should - call this function. - - This must be compatible __eq__. - - All sets ought to compare equal if they contain the same - elements, regardless of how they are implemented, and - regardless of the order of the elements; so there's not much - freedom for __eq__ or __hash__. We match the algorithm used - by the built-in frozenset type. - """ - MAX = sys.maxint - MASK = 2 * MAX + 1 - n = len(self) - h = 1927868237 * (n + 1) - h &= MASK - for x in self: - hx = hash(x) - h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167 - h &= MASK - h = h * 69069 + 907133923 - h &= MASK - if h > MAX: - h -= MASK + 1 - if h == -1: - h = 590923713 - return h - -Set.register(frozenset) - - -class MutableSet(Set): - - @abstractmethod - def add(self, value): - """Add an element.""" - raise NotImplementedError - - @abstractmethod - def discard(self, value): - """Remove an element. Do not raise an exception if absent.""" - raise NotImplementedError - - def remove(self, value): - """Remove an element. If not a member, raise a KeyError.""" - if value not in self: - raise KeyError(value) - self.discard(value) - - def pop(self): - """Return the popped value. Raise KeyError if empty.""" - it = iter(self) - try: - value = next(it) - except StopIteration: - raise KeyError - self.discard(value) - return value - - def clear(self): - """This is slow (creates N new iterators!) but effective.""" - try: - while True: - self.pop() - except KeyError: - pass - - def __ior__(self, it): - for value in it: - self.add(value) - return self - - def __iand__(self, it): - for value in (self - it): - self.discard(value) - return self - - def __ixor__(self, it): - if not isinstance(it, Set): - it = self._from_iterable(it) - for value in it: - if value in self: - self.discard(value) - else: - self.add(value) - return self - - def __isub__(self, it): - for value in it: - self.discard(value) - return self - -MutableSet.register(set) - - -### MAPPINGS ### - - -class Mapping(Sized, Iterable, Container): - - @abstractmethod - def __getitem__(self, key): - raise KeyError - - def get(self, key, default=None): - try: - return self[key] - except KeyError: - return default - - def __contains__(self, key): - try: - self[key] - except KeyError: - return False - else: - return True - - def iterkeys(self): - return iter(self) - - def itervalues(self): - for key in self: - yield self[key] - - def iteritems(self): - for key in self: - yield (key, self[key]) - - def keys(self): - return list(self) - - def items(self): - return [(key, self[key]) for key in self] - - def values(self): - return [self[key] for key in self] - - # Mappings are not hashable by default, but subclasses can change this - __hash__ = None - - def __eq__(self, other): - return isinstance(other, Mapping) and \ - dict(self.items()) == dict(other.items()) - - def __ne__(self, other): - return not (self == other) - -class MappingView(Sized): - - def __init__(self, mapping): - self._mapping = mapping - - def __len__(self): - return len(self._mapping) - - -class KeysView(MappingView, Set): - - def __contains__(self, key): - return key in self._mapping - - def __iter__(self): - for key in self._mapping: - yield key - - -class ItemsView(MappingView, Set): - - def __contains__(self, item): - key, value = item - try: - v = self._mapping[key] - except KeyError: - return False - else: - return v == value - - def __iter__(self): - for key in self._mapping: - yield (key, self._mapping[key]) - - -class ValuesView(MappingView): - - def __contains__(self, value): - for key in self._mapping: - if value == self._mapping[key]: - return True - return False - - def __iter__(self): - for key in self._mapping: - yield self._mapping[key] - - -class MutableMapping(Mapping): - - @abstractmethod - def __setitem__(self, key, value): - raise KeyError - - @abstractmethod - def __delitem__(self, key): - raise KeyError - - __marker = object() - - def pop(self, key, default=__marker): - try: - value = self[key] - except KeyError: - if default is self.__marker: - raise - return default - else: - del self[key] - return value - - def popitem(self): - try: - key = next(iter(self)) - except StopIteration: - raise KeyError - value = self[key] - del self[key] - return key, value - - def clear(self): - try: - while True: - self.popitem() - except KeyError: - pass - - def update(self, other=(), **kwds): - if isinstance(other, Mapping): - for key in other: - self[key] = other[key] - elif hasattr(other, "keys"): - for key in other.keys(): - self[key] = other[key] - else: - for key, value in other: - self[key] = value - for key, value in kwds.items(): - self[key] = value - - def setdefault(self, key, default=None): - try: - return self[key] - except KeyError: - self[key] = default - return default - -MutableMapping.register(dict) - - -### SEQUENCES ### - - -class Sequence(Sized, Iterable, Container): - """All the operations on a read-only sequence. - - Concrete subclasses must override __new__ or __init__, - __getitem__, and __len__. - """ - - @abstractmethod - def __getitem__(self, index): - raise IndexError - - def __iter__(self): - i = 0 - try: - while True: - v = self[i] - yield v - i += 1 - except IndexError: - return - - def __contains__(self, value): - for v in self: - if v == value: - return True - return False - - def __reversed__(self): - for i in reversed(range(len(self))): - yield self[i] - - def index(self, value): - for i, v in enumerate(self): - if v == value: - return i - raise ValueError - - def count(self, value): - return sum(1 for v in self if v == value) - -Sequence.register(tuple) -Sequence.register(basestring) -Sequence.register(buffer) -Sequence.register(xrange) - - -class MutableSequence(Sequence): - - @abstractmethod - def __setitem__(self, index, value): - raise IndexError - - @abstractmethod - def __delitem__(self, index): - raise IndexError - - @abstractmethod - def insert(self, index, value): - raise IndexError - - def append(self, value): - self.insert(len(self), value) - - def reverse(self): - n = len(self) - for i in range(n//2): - self[i], self[n-i-1] = self[n-i-1], self[i] - - def extend(self, values): - for v in values: - self.append(v) - - def pop(self, index=-1): - v = self[index] - del self[index] - return v - - def remove(self, value): - del self[self.index(value)] - - def __iadd__(self, values): - self.extend(values) - return self - -MutableSequence.register(list) diff --git a/icarus-miner/data/usr/lib/python2.6/_abcoll.pyc b/icarus-miner/data/usr/lib/python2.6/_abcoll.pyc deleted file mode 100644 index 8795e1e..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/_abcoll.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/abc.py b/icarus-miner/data/usr/lib/python2.6/abc.py deleted file mode 100644 index 95126d8..0000000 --- a/icarus-miner/data/usr/lib/python2.6/abc.py +++ /dev/null @@ -1,174 +0,0 @@ -# Copyright 2007 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Abstract Base Classes (ABCs) according to PEP 3119.""" - - -def abstractmethod(funcobj): - """A decorator indicating abstract methods. - - Requires that the metaclass is ABCMeta or derived from it. A - class that has a metaclass derived from ABCMeta cannot be - instantiated unless all of its abstract methods are overridden. - The abstract methods can be called using any of the normal - 'super' call mechanisms. - - Usage: - - class C: - __metaclass__ = ABCMeta - @abstractmethod - def my_abstract_method(self, ...): - ... - """ - funcobj.__isabstractmethod__ = True - return funcobj - - -class abstractproperty(property): - """A decorator indicating abstract properties. - - Requires that the metaclass is ABCMeta or derived from it. A - class that has a metaclass derived from ABCMeta cannot be - instantiated unless all of its abstract properties are overridden. - The abstract properties can be called using any of the normal - 'super' call mechanisms. - - Usage: - - class C: - __metaclass__ = ABCMeta - @abstractproperty - def my_abstract_property(self): - ... - - This defines a read-only property; you can also define a read-write - abstract property using the 'long' form of property declaration: - - class C: - __metaclass__ = ABCMeta - def getx(self): ... - def setx(self, value): ... - x = abstractproperty(getx, setx) - """ - __isabstractmethod__ = True - - -class ABCMeta(type): - - """Metaclass for defining Abstract Base Classes (ABCs). - - Use this metaclass to create an ABC. An ABC can be subclassed - directly, and then acts as a mix-in class. You can also register - unrelated concrete classes (even built-in classes) and unrelated - ABCs as 'virtual subclasses' -- these and their descendants will - be considered subclasses of the registering ABC by the built-in - issubclass() function, but the registering ABC won't show up in - their MRO (Method Resolution Order) nor will method - implementations defined by the registering ABC be callable (not - even via super()). - - """ - - # A global counter that is incremented each time a class is - # registered as a virtual subclass of anything. It forces the - # negative cache to be cleared before its next use. - _abc_invalidation_counter = 0 - - def __new__(mcls, name, bases, namespace): - cls = super(ABCMeta, mcls).__new__(mcls, name, bases, namespace) - # Compute set of abstract method names - abstracts = set(name - for name, value in namespace.items() - if getattr(value, "__isabstractmethod__", False)) - for base in bases: - for name in getattr(base, "__abstractmethods__", set()): - value = getattr(cls, name, None) - if getattr(value, "__isabstractmethod__", False): - abstracts.add(name) - cls.__abstractmethods__ = frozenset(abstracts) - # Set up inheritance registry - cls._abc_registry = set() - cls._abc_cache = set() - cls._abc_negative_cache = set() - cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter - return cls - - def register(cls, subclass): - """Register a virtual subclass of an ABC.""" - if not isinstance(cls, type): - raise TypeError("Can only register classes") - if issubclass(subclass, cls): - return # Already a subclass - # Subtle: test for cycles *after* testing for "already a subclass"; - # this means we allow X.register(X) and interpret it as a no-op. - if issubclass(cls, subclass): - # This would create a cycle, which is bad for the algorithm below - raise RuntimeError("Refusing to create an inheritance cycle") - cls._abc_registry.add(subclass) - ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache - - def _dump_registry(cls, file=None): - """Debug helper to print the ABC registry.""" - print >> file, "Class: %s.%s" % (cls.__module__, cls.__name__) - print >> file, "Inv.counter: %s" % ABCMeta._abc_invalidation_counter - for name in sorted(cls.__dict__.keys()): - if name.startswith("_abc_"): - value = getattr(cls, name) - print >> file, "%s: %r" % (name, value) - - def __instancecheck__(cls, instance): - """Override for isinstance(instance, cls).""" - # Inline the cache checking when it's simple. - subclass = getattr(instance, '__class__', None) - if subclass in cls._abc_cache: - return True - subtype = type(instance) - if subtype is subclass or subclass is None: - if (cls._abc_negative_cache_version == - ABCMeta._abc_invalidation_counter and - subtype in cls._abc_negative_cache): - return False - # Fall back to the subclass check. - return cls.__subclasscheck__(subtype) - return (cls.__subclasscheck__(subclass) or - cls.__subclasscheck__(subtype)) - - def __subclasscheck__(cls, subclass): - """Override for issubclass(subclass, cls).""" - # Check cache - if subclass in cls._abc_cache: - return True - # Check negative cache; may have to invalidate - if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter: - # Invalidate the negative cache - cls._abc_negative_cache = set() - cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter - elif subclass in cls._abc_negative_cache: - return False - # Check the subclass hook - ok = cls.__subclasshook__(subclass) - if ok is not NotImplemented: - assert isinstance(ok, bool) - if ok: - cls._abc_cache.add(subclass) - else: - cls._abc_negative_cache.add(subclass) - return ok - # Check if it's a direct subclass - if cls in getattr(subclass, '__mro__', ()): - cls._abc_cache.add(subclass) - return True - # Check if it's a subclass of a registered class (recursive) - for rcls in cls._abc_registry: - if issubclass(subclass, rcls): - cls._abc_cache.add(subclass) - return True - # Check if it's a subclass of a subclass (recursive) - for scls in cls.__subclasses__(): - if issubclass(subclass, scls): - cls._abc_cache.add(subclass) - return True - # No dice; update negative cache - cls._abc_negative_cache.add(subclass) - return False diff --git a/icarus-miner/data/usr/lib/python2.6/abc.pyc b/icarus-miner/data/usr/lib/python2.6/abc.pyc deleted file mode 100644 index 30f8210..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/abc.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/aliases.py b/icarus-miner/data/usr/lib/python2.6/aliases.py deleted file mode 100644 index c6f5aeb..0000000 --- a/icarus-miner/data/usr/lib/python2.6/aliases.py +++ /dev/null @@ -1,522 +0,0 @@ -""" Encoding Aliases Support - - This module is used by the encodings package search function to - map encodings names to module names. - - Note that the search function normalizes the encoding names before - doing the lookup, so the mapping will have to map normalized - encoding names to module names. - - Contents: - - The following aliases dictionary contains mappings of all IANA - character set names for which the Python core library provides - codecs. In addition to these, a few Python specific codec - aliases have also been added. - -""" -aliases = { - - # Please keep this list sorted alphabetically by value ! - - # ascii codec - '646' : 'ascii', - 'ansi_x3.4_1968' : 'ascii', - 'ansi_x3_4_1968' : 'ascii', # some email headers use this non-standard name - 'ansi_x3.4_1986' : 'ascii', - 'cp367' : 'ascii', - 'csascii' : 'ascii', - 'ibm367' : 'ascii', - 'iso646_us' : 'ascii', - 'iso_646.irv_1991' : 'ascii', - 'iso_ir_6' : 'ascii', - 'us' : 'ascii', - 'us_ascii' : 'ascii', - - # base64_codec codec - 'base64' : 'base64_codec', - 'base_64' : 'base64_codec', - - # big5 codec - 'big5_tw' : 'big5', - 'csbig5' : 'big5', - - # big5hkscs codec - 'big5_hkscs' : 'big5hkscs', - 'hkscs' : 'big5hkscs', - - # bz2_codec codec - 'bz2' : 'bz2_codec', - - # cp037 codec - '037' : 'cp037', - 'csibm037' : 'cp037', - 'ebcdic_cp_ca' : 'cp037', - 'ebcdic_cp_nl' : 'cp037', - 'ebcdic_cp_us' : 'cp037', - 'ebcdic_cp_wt' : 'cp037', - 'ibm037' : 'cp037', - 'ibm039' : 'cp037', - - # cp1026 codec - '1026' : 'cp1026', - 'csibm1026' : 'cp1026', - 'ibm1026' : 'cp1026', - - # cp1140 codec - '1140' : 'cp1140', - 'ibm1140' : 'cp1140', - - # cp1250 codec - '1250' : 'cp1250', - 'windows_1250' : 'cp1250', - - # cp1251 codec - '1251' : 'cp1251', - 'windows_1251' : 'cp1251', - - # cp1252 codec - '1252' : 'cp1252', - 'windows_1252' : 'cp1252', - - # cp1253 codec - '1253' : 'cp1253', - 'windows_1253' : 'cp1253', - - # cp1254 codec - '1254' : 'cp1254', - 'windows_1254' : 'cp1254', - - # cp1255 codec - '1255' : 'cp1255', - 'windows_1255' : 'cp1255', - - # cp1256 codec - '1256' : 'cp1256', - 'windows_1256' : 'cp1256', - - # cp1257 codec - '1257' : 'cp1257', - 'windows_1257' : 'cp1257', - - # cp1258 codec - '1258' : 'cp1258', - 'windows_1258' : 'cp1258', - - # cp424 codec - '424' : 'cp424', - 'csibm424' : 'cp424', - 'ebcdic_cp_he' : 'cp424', - 'ibm424' : 'cp424', - - # cp437 codec - '437' : 'cp437', - 'cspc8codepage437' : 'cp437', - 'ibm437' : 'cp437', - - # cp500 codec - '500' : 'cp500', - 'csibm500' : 'cp500', - 'ebcdic_cp_be' : 'cp500', - 'ebcdic_cp_ch' : 'cp500', - 'ibm500' : 'cp500', - - # cp775 codec - '775' : 'cp775', - 'cspc775baltic' : 'cp775', - 'ibm775' : 'cp775', - - # cp850 codec - '850' : 'cp850', - 'cspc850multilingual' : 'cp850', - 'ibm850' : 'cp850', - - # cp852 codec - '852' : 'cp852', - 'cspcp852' : 'cp852', - 'ibm852' : 'cp852', - - # cp855 codec - '855' : 'cp855', - 'csibm855' : 'cp855', - 'ibm855' : 'cp855', - - # cp857 codec - '857' : 'cp857', - 'csibm857' : 'cp857', - 'ibm857' : 'cp857', - - # cp860 codec - '860' : 'cp860', - 'csibm860' : 'cp860', - 'ibm860' : 'cp860', - - # cp861 codec - '861' : 'cp861', - 'cp_is' : 'cp861', - 'csibm861' : 'cp861', - 'ibm861' : 'cp861', - - # cp862 codec - '862' : 'cp862', - 'cspc862latinhebrew' : 'cp862', - 'ibm862' : 'cp862', - - # cp863 codec - '863' : 'cp863', - 'csibm863' : 'cp863', - 'ibm863' : 'cp863', - - # cp864 codec - '864' : 'cp864', - 'csibm864' : 'cp864', - 'ibm864' : 'cp864', - - # cp865 codec - '865' : 'cp865', - 'csibm865' : 'cp865', - 'ibm865' : 'cp865', - - # cp866 codec - '866' : 'cp866', - 'csibm866' : 'cp866', - 'ibm866' : 'cp866', - - # cp869 codec - '869' : 'cp869', - 'cp_gr' : 'cp869', - 'csibm869' : 'cp869', - 'ibm869' : 'cp869', - - # cp932 codec - '932' : 'cp932', - 'ms932' : 'cp932', - 'mskanji' : 'cp932', - 'ms_kanji' : 'cp932', - - # cp949 codec - '949' : 'cp949', - 'ms949' : 'cp949', - 'uhc' : 'cp949', - - # cp950 codec - '950' : 'cp950', - 'ms950' : 'cp950', - - # euc_jis_2004 codec - 'jisx0213' : 'euc_jis_2004', - 'eucjis2004' : 'euc_jis_2004', - 'euc_jis2004' : 'euc_jis_2004', - - # euc_jisx0213 codec - 'eucjisx0213' : 'euc_jisx0213', - - # euc_jp codec - 'eucjp' : 'euc_jp', - 'ujis' : 'euc_jp', - 'u_jis' : 'euc_jp', - - # euc_kr codec - 'euckr' : 'euc_kr', - 'korean' : 'euc_kr', - 'ksc5601' : 'euc_kr', - 'ks_c_5601' : 'euc_kr', - 'ks_c_5601_1987' : 'euc_kr', - 'ksx1001' : 'euc_kr', - 'ks_x_1001' : 'euc_kr', - - # gb18030 codec - 'gb18030_2000' : 'gb18030', - - # gb2312 codec - 'chinese' : 'gb2312', - 'csiso58gb231280' : 'gb2312', - 'euc_cn' : 'gb2312', - 'euccn' : 'gb2312', - 'eucgb2312_cn' : 'gb2312', - 'gb2312_1980' : 'gb2312', - 'gb2312_80' : 'gb2312', - 'iso_ir_58' : 'gb2312', - - # gbk codec - '936' : 'gbk', - 'cp936' : 'gbk', - 'ms936' : 'gbk', - - # hex_codec codec - 'hex' : 'hex_codec', - - # hp_roman8 codec - 'roman8' : 'hp_roman8', - 'r8' : 'hp_roman8', - 'csHPRoman8' : 'hp_roman8', - - # hz codec - 'hzgb' : 'hz', - 'hz_gb' : 'hz', - 'hz_gb_2312' : 'hz', - - # iso2022_jp codec - 'csiso2022jp' : 'iso2022_jp', - 'iso2022jp' : 'iso2022_jp', - 'iso_2022_jp' : 'iso2022_jp', - - # iso2022_jp_1 codec - 'iso2022jp_1' : 'iso2022_jp_1', - 'iso_2022_jp_1' : 'iso2022_jp_1', - - # iso2022_jp_2 codec - 'iso2022jp_2' : 'iso2022_jp_2', - 'iso_2022_jp_2' : 'iso2022_jp_2', - - # iso2022_jp_2004 codec - 'iso_2022_jp_2004' : 'iso2022_jp_2004', - 'iso2022jp_2004' : 'iso2022_jp_2004', - - # iso2022_jp_3 codec - 'iso2022jp_3' : 'iso2022_jp_3', - 'iso_2022_jp_3' : 'iso2022_jp_3', - - # iso2022_jp_ext codec - 'iso2022jp_ext' : 'iso2022_jp_ext', - 'iso_2022_jp_ext' : 'iso2022_jp_ext', - - # iso2022_kr codec - 'csiso2022kr' : 'iso2022_kr', - 'iso2022kr' : 'iso2022_kr', - 'iso_2022_kr' : 'iso2022_kr', - - # iso8859_10 codec - 'csisolatin6' : 'iso8859_10', - 'iso_8859_10' : 'iso8859_10', - 'iso_8859_10_1992' : 'iso8859_10', - 'iso_ir_157' : 'iso8859_10', - 'l6' : 'iso8859_10', - 'latin6' : 'iso8859_10', - - # iso8859_11 codec - 'thai' : 'iso8859_11', - 'iso_8859_11' : 'iso8859_11', - 'iso_8859_11_2001' : 'iso8859_11', - - # iso8859_13 codec - 'iso_8859_13' : 'iso8859_13', - 'l7' : 'iso8859_13', - 'latin7' : 'iso8859_13', - - # iso8859_14 codec - 'iso_8859_14' : 'iso8859_14', - 'iso_8859_14_1998' : 'iso8859_14', - 'iso_celtic' : 'iso8859_14', - 'iso_ir_199' : 'iso8859_14', - 'l8' : 'iso8859_14', - 'latin8' : 'iso8859_14', - - # iso8859_15 codec - 'iso_8859_15' : 'iso8859_15', - 'l9' : 'iso8859_15', - 'latin9' : 'iso8859_15', - - # iso8859_16 codec - 'iso_8859_16' : 'iso8859_16', - 'iso_8859_16_2001' : 'iso8859_16', - 'iso_ir_226' : 'iso8859_16', - 'l10' : 'iso8859_16', - 'latin10' : 'iso8859_16', - - # iso8859_2 codec - 'csisolatin2' : 'iso8859_2', - 'iso_8859_2' : 'iso8859_2', - 'iso_8859_2_1987' : 'iso8859_2', - 'iso_ir_101' : 'iso8859_2', - 'l2' : 'iso8859_2', - 'latin2' : 'iso8859_2', - - # iso8859_3 codec - 'csisolatin3' : 'iso8859_3', - 'iso_8859_3' : 'iso8859_3', - 'iso_8859_3_1988' : 'iso8859_3', - 'iso_ir_109' : 'iso8859_3', - 'l3' : 'iso8859_3', - 'latin3' : 'iso8859_3', - - # iso8859_4 codec - 'csisolatin4' : 'iso8859_4', - 'iso_8859_4' : 'iso8859_4', - 'iso_8859_4_1988' : 'iso8859_4', - 'iso_ir_110' : 'iso8859_4', - 'l4' : 'iso8859_4', - 'latin4' : 'iso8859_4', - - # iso8859_5 codec - 'csisolatincyrillic' : 'iso8859_5', - 'cyrillic' : 'iso8859_5', - 'iso_8859_5' : 'iso8859_5', - 'iso_8859_5_1988' : 'iso8859_5', - 'iso_ir_144' : 'iso8859_5', - - # iso8859_6 codec - 'arabic' : 'iso8859_6', - 'asmo_708' : 'iso8859_6', - 'csisolatinarabic' : 'iso8859_6', - 'ecma_114' : 'iso8859_6', - 'iso_8859_6' : 'iso8859_6', - 'iso_8859_6_1987' : 'iso8859_6', - 'iso_ir_127' : 'iso8859_6', - - # iso8859_7 codec - 'csisolatingreek' : 'iso8859_7', - 'ecma_118' : 'iso8859_7', - 'elot_928' : 'iso8859_7', - 'greek' : 'iso8859_7', - 'greek8' : 'iso8859_7', - 'iso_8859_7' : 'iso8859_7', - 'iso_8859_7_1987' : 'iso8859_7', - 'iso_ir_126' : 'iso8859_7', - - # iso8859_8 codec - 'csisolatinhebrew' : 'iso8859_8', - 'hebrew' : 'iso8859_8', - 'iso_8859_8' : 'iso8859_8', - 'iso_8859_8_1988' : 'iso8859_8', - 'iso_ir_138' : 'iso8859_8', - - # iso8859_9 codec - 'csisolatin5' : 'iso8859_9', - 'iso_8859_9' : 'iso8859_9', - 'iso_8859_9_1989' : 'iso8859_9', - 'iso_ir_148' : 'iso8859_9', - 'l5' : 'iso8859_9', - 'latin5' : 'iso8859_9', - - # johab codec - 'cp1361' : 'johab', - 'ms1361' : 'johab', - - # koi8_r codec - 'cskoi8r' : 'koi8_r', - - # latin_1 codec - # - # Note that the latin_1 codec is implemented internally in C and a - # lot faster than the charmap codec iso8859_1 which uses the same - # encoding. This is why we discourage the use of the iso8859_1 - # codec and alias it to latin_1 instead. - # - '8859' : 'latin_1', - 'cp819' : 'latin_1', - 'csisolatin1' : 'latin_1', - 'ibm819' : 'latin_1', - 'iso8859' : 'latin_1', - 'iso8859_1' : 'latin_1', - 'iso_8859_1' : 'latin_1', - 'iso_8859_1_1987' : 'latin_1', - 'iso_ir_100' : 'latin_1', - 'l1' : 'latin_1', - 'latin' : 'latin_1', - 'latin1' : 'latin_1', - - # mac_cyrillic codec - 'maccyrillic' : 'mac_cyrillic', - - # mac_greek codec - 'macgreek' : 'mac_greek', - - # mac_iceland codec - 'maciceland' : 'mac_iceland', - - # mac_latin2 codec - 'maccentraleurope' : 'mac_latin2', - 'maclatin2' : 'mac_latin2', - - # mac_roman codec - 'macroman' : 'mac_roman', - - # mac_turkish codec - 'macturkish' : 'mac_turkish', - - # mbcs codec - 'dbcs' : 'mbcs', - - # ptcp154 codec - 'csptcp154' : 'ptcp154', - 'pt154' : 'ptcp154', - 'cp154' : 'ptcp154', - 'cyrillic-asian' : 'ptcp154', - - # quopri_codec codec - 'quopri' : 'quopri_codec', - 'quoted_printable' : 'quopri_codec', - 'quotedprintable' : 'quopri_codec', - - # rot_13 codec - 'rot13' : 'rot_13', - - # shift_jis codec - 'csshiftjis' : 'shift_jis', - 'shiftjis' : 'shift_jis', - 'sjis' : 'shift_jis', - 's_jis' : 'shift_jis', - - # shift_jis_2004 codec - 'shiftjis2004' : 'shift_jis_2004', - 'sjis_2004' : 'shift_jis_2004', - 's_jis_2004' : 'shift_jis_2004', - - # shift_jisx0213 codec - 'shiftjisx0213' : 'shift_jisx0213', - 'sjisx0213' : 'shift_jisx0213', - 's_jisx0213' : 'shift_jisx0213', - - # tactis codec - 'tis260' : 'tactis', - - # tis_620 codec - 'tis620' : 'tis_620', - 'tis_620_0' : 'tis_620', - 'tis_620_2529_0' : 'tis_620', - 'tis_620_2529_1' : 'tis_620', - 'iso_ir_166' : 'tis_620', - - # utf_16 codec - 'u16' : 'utf_16', - 'utf16' : 'utf_16', - - # utf_16_be codec - 'unicodebigunmarked' : 'utf_16_be', - 'utf_16be' : 'utf_16_be', - - # utf_16_le codec - 'unicodelittleunmarked' : 'utf_16_le', - 'utf_16le' : 'utf_16_le', - - # utf_32 codec - 'u32' : 'utf_32', - 'utf32' : 'utf_32', - - # utf_32_be codec - 'utf_32be' : 'utf_32_be', - - # utf_32_le codec - 'utf_32le' : 'utf_32_le', - - # utf_7 codec - 'u7' : 'utf_7', - 'utf7' : 'utf_7', - 'unicode_1_1_utf_7' : 'utf_7', - - # utf_8 codec - 'u8' : 'utf_8', - 'utf' : 'utf_8', - 'utf8' : 'utf_8', - 'utf8_ucs2' : 'utf_8', - 'utf8_ucs4' : 'utf_8', - - # uu_codec codec - 'uu' : 'uu_codec', - - # zlib_codec codec - 'zip' : 'zlib_codec', - 'zlib' : 'zlib_codec', - -} diff --git a/icarus-miner/data/usr/lib/python2.6/aliases.pyc b/icarus-miner/data/usr/lib/python2.6/aliases.pyc deleted file mode 100644 index 39f77e5..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/aliases.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/ascii.py b/icarus-miner/data/usr/lib/python2.6/ascii.py deleted file mode 100644 index 2033cde..0000000 --- a/icarus-miner/data/usr/lib/python2.6/ascii.py +++ /dev/null @@ -1,50 +0,0 @@ -""" Python 'ascii' Codec - - -Written by Marc-Andre Lemburg (mal@lemburg.com). - -(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. - -""" -import codecs - -### Codec APIs - -class Codec(codecs.Codec): - - # Note: Binding these as C functions will result in the class not - # converting them to methods. This is intended. - encode = codecs.ascii_encode - decode = codecs.ascii_decode - -class IncrementalEncoder(codecs.IncrementalEncoder): - def encode(self, input, final=False): - return codecs.ascii_encode(input, self.errors)[0] - -class IncrementalDecoder(codecs.IncrementalDecoder): - def decode(self, input, final=False): - return codecs.ascii_decode(input, self.errors)[0] - -class StreamWriter(Codec,codecs.StreamWriter): - pass - -class StreamReader(Codec,codecs.StreamReader): - pass - -class StreamConverter(StreamWriter,StreamReader): - - encode = codecs.ascii_decode - decode = codecs.ascii_encode - -### encodings module API - -def getregentry(): - return codecs.CodecInfo( - name='ascii', - encode=Codec.encode, - decode=Codec.decode, - incrementalencoder=IncrementalEncoder, - incrementaldecoder=IncrementalDecoder, - streamwriter=StreamWriter, - streamreader=StreamReader, - ) diff --git a/icarus-miner/data/usr/lib/python2.6/ascii.pyc b/icarus-miner/data/usr/lib/python2.6/ascii.pyc deleted file mode 100644 index d71d520..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/ascii.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/base64.py b/icarus-miner/data/usr/lib/python2.6/base64.py deleted file mode 100755 index 41a5e14..0000000 --- a/icarus-miner/data/usr/lib/python2.6/base64.py +++ /dev/null @@ -1,359 +0,0 @@ -#! /usr/bin/env python - -"""RFC 3548: Base16, Base32, Base64 Data Encodings""" - -# Modified 04-Oct-1995 by Jack Jansen to use binascii module -# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support - -import re -import struct -import binascii - - -__all__ = [ - # Legacy interface exports traditional RFC 1521 Base64 encodings - 'encode', 'decode', 'encodestring', 'decodestring', - # Generalized interface for other encodings - 'b64encode', 'b64decode', 'b32encode', 'b32decode', - 'b16encode', 'b16decode', - # Standard Base64 encoding - 'standard_b64encode', 'standard_b64decode', - # Some common Base64 alternatives. As referenced by RFC 3458, see thread - # starting at: - # - # http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html - 'urlsafe_b64encode', 'urlsafe_b64decode', - ] - -_translation = [chr(_x) for _x in range(256)] -EMPTYSTRING = '' - - -def _translate(s, altchars): - translation = _translation[:] - for k, v in altchars.items(): - translation[ord(k)] = v - return s.translate(''.join(translation)) - - - -# Base64 encoding/decoding uses binascii - -def b64encode(s, altchars=None): - """Encode a string using Base64. - - s is the string to encode. Optional altchars must be a string of at least - length 2 (additional characters are ignored) which specifies an - alternative alphabet for the '+' and '/' characters. This allows an - application to e.g. generate url or filesystem safe Base64 strings. - - The encoded string is returned. - """ - # Strip off the trailing newline - encoded = binascii.b2a_base64(s)[:-1] - if altchars is not None: - return _translate(encoded, {'+': altchars[0], '/': altchars[1]}) - return encoded - - -def b64decode(s, altchars=None): - """Decode a Base64 encoded string. - - s is the string to decode. Optional altchars must be a string of at least - length 2 (additional characters are ignored) which specifies the - alternative alphabet used instead of the '+' and '/' characters. - - The decoded string is returned. A TypeError is raised if s were - incorrectly padded or if there are non-alphabet characters present in the - string. - """ - if altchars is not None: - s = _translate(s, {altchars[0]: '+', altchars[1]: '/'}) - try: - return binascii.a2b_base64(s) - except binascii.Error, msg: - # Transform this exception for consistency - raise TypeError(msg) - - -def standard_b64encode(s): - """Encode a string using the standard Base64 alphabet. - - s is the string to encode. The encoded string is returned. - """ - return b64encode(s) - -def standard_b64decode(s): - """Decode a string encoded with the standard Base64 alphabet. - - s is the string to decode. The decoded string is returned. A TypeError - is raised if the string is incorrectly padded or if there are non-alphabet - characters present in the string. - """ - return b64decode(s) - -def urlsafe_b64encode(s): - """Encode a string using a url-safe Base64 alphabet. - - s is the string to encode. The encoded string is returned. The alphabet - uses '-' instead of '+' and '_' instead of '/'. - """ - return b64encode(s, '-_') - -def urlsafe_b64decode(s): - """Decode a string encoded with the standard Base64 alphabet. - - s is the string to decode. The decoded string is returned. A TypeError - is raised if the string is incorrectly padded or if there are non-alphabet - characters present in the string. - - The alphabet uses '-' instead of '+' and '_' instead of '/'. - """ - return b64decode(s, '-_') - - - -# Base32 encoding/decoding must be done in Python -_b32alphabet = { - 0: 'A', 9: 'J', 18: 'S', 27: '3', - 1: 'B', 10: 'K', 19: 'T', 28: '4', - 2: 'C', 11: 'L', 20: 'U', 29: '5', - 3: 'D', 12: 'M', 21: 'V', 30: '6', - 4: 'E', 13: 'N', 22: 'W', 31: '7', - 5: 'F', 14: 'O', 23: 'X', - 6: 'G', 15: 'P', 24: 'Y', - 7: 'H', 16: 'Q', 25: 'Z', - 8: 'I', 17: 'R', 26: '2', - } - -_b32tab = _b32alphabet.items() -_b32tab.sort() -_b32tab = [v for k, v in _b32tab] -_b32rev = dict([(v, long(k)) for k, v in _b32alphabet.items()]) - - -def b32encode(s): - """Encode a string using Base32. - - s is the string to encode. The encoded string is returned. - """ - parts = [] - quanta, leftover = divmod(len(s), 5) - # Pad the last quantum with zero bits if necessary - if leftover: - s += ('\0' * (5 - leftover)) - quanta += 1 - for i in range(quanta): - # c1 and c2 are 16 bits wide, c3 is 8 bits wide. The intent of this - # code is to process the 40 bits in units of 5 bits. So we take the 1 - # leftover bit of c1 and tack it onto c2. Then we take the 2 leftover - # bits of c2 and tack them onto c3. The shifts and masks are intended - # to give us values of exactly 5 bits in width. - c1, c2, c3 = struct.unpack('!HHB', s[i*5:(i+1)*5]) - c2 += (c1 & 1) << 16 # 17 bits wide - c3 += (c2 & 3) << 8 # 10 bits wide - parts.extend([_b32tab[c1 >> 11], # bits 1 - 5 - _b32tab[(c1 >> 6) & 0x1f], # bits 6 - 10 - _b32tab[(c1 >> 1) & 0x1f], # bits 11 - 15 - _b32tab[c2 >> 12], # bits 16 - 20 (1 - 5) - _b32tab[(c2 >> 7) & 0x1f], # bits 21 - 25 (6 - 10) - _b32tab[(c2 >> 2) & 0x1f], # bits 26 - 30 (11 - 15) - _b32tab[c3 >> 5], # bits 31 - 35 (1 - 5) - _b32tab[c3 & 0x1f], # bits 36 - 40 (1 - 5) - ]) - encoded = EMPTYSTRING.join(parts) - # Adjust for any leftover partial quanta - if leftover == 1: - return encoded[:-6] + '======' - elif leftover == 2: - return encoded[:-4] + '====' - elif leftover == 3: - return encoded[:-3] + '===' - elif leftover == 4: - return encoded[:-1] + '=' - return encoded - - -def b32decode(s, casefold=False, map01=None): - """Decode a Base32 encoded string. - - s is the string to decode. Optional casefold is a flag specifying whether - a lowercase alphabet is acceptable as input. For security purposes, the - default is False. - - RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O - (oh), and for optional mapping of the digit 1 (one) to either the letter I - (eye) or letter L (el). The optional argument map01 when not None, - specifies which letter the digit 1 should be mapped to (when map01 is not - None, the digit 0 is always mapped to the letter O). For security - purposes the default is None, so that 0 and 1 are not allowed in the - input. - - The decoded string is returned. A TypeError is raised if s were - incorrectly padded or if there are non-alphabet characters present in the - string. - """ - quanta, leftover = divmod(len(s), 8) - if leftover: - raise TypeError('Incorrect padding') - # Handle section 2.4 zero and one mapping. The flag map01 will be either - # False, or the character to map the digit 1 (one) to. It should be - # either L (el) or I (eye). - if map01: - s = _translate(s, {'0': 'O', '1': map01}) - if casefold: - s = s.upper() - # Strip off pad characters from the right. We need to count the pad - # characters because this will tell us how many null bytes to remove from - # the end of the decoded string. - padchars = 0 - mo = re.search('(?P[=]*)$', s) - if mo: - padchars = len(mo.group('pad')) - if padchars > 0: - s = s[:-padchars] - # Now decode the full quanta - parts = [] - acc = 0 - shift = 35 - for c in s: - val = _b32rev.get(c) - if val is None: - raise TypeError('Non-base32 digit found') - acc += _b32rev[c] << shift - shift -= 5 - if shift < 0: - parts.append(binascii.unhexlify('%010x' % acc)) - acc = 0 - shift = 35 - # Process the last, partial quanta - last = binascii.unhexlify('%010x' % acc) - if padchars == 0: - last = '' # No characters - elif padchars == 1: - last = last[:-1] - elif padchars == 3: - last = last[:-2] - elif padchars == 4: - last = last[:-3] - elif padchars == 6: - last = last[:-4] - else: - raise TypeError('Incorrect padding') - parts.append(last) - return EMPTYSTRING.join(parts) - - - -# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns -# lowercase. The RFC also recommends against accepting input case -# insensitively. -def b16encode(s): - """Encode a string using Base16. - - s is the string to encode. The encoded string is returned. - """ - return binascii.hexlify(s).upper() - - -def b16decode(s, casefold=False): - """Decode a Base16 encoded string. - - s is the string to decode. Optional casefold is a flag specifying whether - a lowercase alphabet is acceptable as input. For security purposes, the - default is False. - - The decoded string is returned. A TypeError is raised if s were - incorrectly padded or if there are non-alphabet characters present in the - string. - """ - if casefold: - s = s.upper() - if re.search('[^0-9A-F]', s): - raise TypeError('Non-base16 digit found') - return binascii.unhexlify(s) - - - -# Legacy interface. This code could be cleaned up since I don't believe -# binascii has any line length limitations. It just doesn't seem worth it -# though. - -MAXLINESIZE = 76 # Excluding the CRLF -MAXBINSIZE = (MAXLINESIZE//4)*3 - -def encode(input, output): - """Encode a file.""" - while True: - s = input.read(MAXBINSIZE) - if not s: - break - while len(s) < MAXBINSIZE: - ns = input.read(MAXBINSIZE-len(s)) - if not ns: - break - s += ns - line = binascii.b2a_base64(s) - output.write(line) - - -def decode(input, output): - """Decode a file.""" - while True: - line = input.readline() - if not line: - break - s = binascii.a2b_base64(line) - output.write(s) - - -def encodestring(s): - """Encode a string into multiple lines of base-64 data.""" - pieces = [] - for i in range(0, len(s), MAXBINSIZE): - chunk = s[i : i + MAXBINSIZE] - pieces.append(binascii.b2a_base64(chunk)) - return "".join(pieces) - - -def decodestring(s): - """Decode a string.""" - return binascii.a2b_base64(s) - - - -# Useable as a script... -def test(): - """Small test program""" - import sys, getopt - try: - opts, args = getopt.getopt(sys.argv[1:], 'deut') - except getopt.error, msg: - sys.stdout = sys.stderr - print msg - print """usage: %s [-d|-e|-u|-t] [file|-] - -d, -u: decode - -e: encode (default) - -t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0] - sys.exit(2) - func = encode - for o, a in opts: - if o == '-e': func = encode - if o == '-d': func = decode - if o == '-u': func = decode - if o == '-t': test1(); return - if args and args[0] != '-': - func(open(args[0], 'rb'), sys.stdout) - else: - func(sys.stdin, sys.stdout) - - -def test1(): - s0 = "Aladdin:open sesame" - s1 = encodestring(s0) - s2 = decodestring(s1) - print s0, repr(s1), s2 - - -if __name__ == '__main__': - test() diff --git a/icarus-miner/data/usr/lib/python2.6/base64.pyc b/icarus-miner/data/usr/lib/python2.6/base64.pyc deleted file mode 100644 index 8045642..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/base64.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/bisect.py b/icarus-miner/data/usr/lib/python2.6/bisect.py deleted file mode 100644 index fe84255..0000000 --- a/icarus-miner/data/usr/lib/python2.6/bisect.py +++ /dev/null @@ -1,92 +0,0 @@ -"""Bisection algorithms.""" - -def insort_right(a, x, lo=0, hi=None): - """Insert item x in list a, and keep it sorted assuming a is sorted. - - If x is already in a, insert it to the right of the rightmost x. - - Optional args lo (default 0) and hi (default len(a)) bound the - slice of a to be searched. - """ - - if lo < 0: - raise ValueError('lo must be non-negative') - if hi is None: - hi = len(a) - while lo < hi: - mid = (lo+hi)//2 - if x < a[mid]: hi = mid - else: lo = mid+1 - a.insert(lo, x) - -insort = insort_right # backward compatibility - -def bisect_right(a, x, lo=0, hi=None): - """Return the index where to insert item x in list a, assuming a is sorted. - - The return value i is such that all e in a[:i] have e <= x, and all e in - a[i:] have e > x. So if x already appears in the list, a.insert(x) will - insert just after the rightmost x already there. - - Optional args lo (default 0) and hi (default len(a)) bound the - slice of a to be searched. - """ - - if lo < 0: - raise ValueError('lo must be non-negative') - if hi is None: - hi = len(a) - while lo < hi: - mid = (lo+hi)//2 - if x < a[mid]: hi = mid - else: lo = mid+1 - return lo - -bisect = bisect_right # backward compatibility - -def insort_left(a, x, lo=0, hi=None): - """Insert item x in list a, and keep it sorted assuming a is sorted. - - If x is already in a, insert it to the left of the leftmost x. - - Optional args lo (default 0) and hi (default len(a)) bound the - slice of a to be searched. - """ - - if lo < 0: - raise ValueError('lo must be non-negative') - if hi is None: - hi = len(a) - while lo < hi: - mid = (lo+hi)//2 - if a[mid] < x: lo = mid+1 - else: hi = mid - a.insert(lo, x) - - -def bisect_left(a, x, lo=0, hi=None): - """Return the index where to insert item x in list a, assuming a is sorted. - - The return value i is such that all e in a[:i] have e < x, and all e in - a[i:] have e >= x. So if x already appears in the list, a.insert(x) will - insert just before the leftmost x already there. - - Optional args lo (default 0) and hi (default len(a)) bound the - slice of a to be searched. - """ - - if lo < 0: - raise ValueError('lo must be non-negative') - if hi is None: - hi = len(a) - while lo < hi: - mid = (lo+hi)//2 - if a[mid] < x: lo = mid+1 - else: hi = mid - return lo - -# Overwrite above definitions with a fast C implementation -try: - from _bisect import bisect_right, bisect_left, insort_left, insort_right, insort, bisect -except ImportError: - pass diff --git a/icarus-miner/data/usr/lib/python2.6/bisect.pyc b/icarus-miner/data/usr/lib/python2.6/bisect.pyc deleted file mode 100644 index 7d6ebc5..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/bisect.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/codecs.py b/icarus-miner/data/usr/lib/python2.6/codecs.py deleted file mode 100644 index 557ccf7..0000000 --- a/icarus-miner/data/usr/lib/python2.6/codecs.py +++ /dev/null @@ -1,1082 +0,0 @@ -""" codecs -- Python Codec Registry, API and helpers. - - -Written by Marc-Andre Lemburg (mal@lemburg.com). - -(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. - -"""#" - -import __builtin__, sys - -### Registry and builtin stateless codec functions - -try: - from _codecs import * -except ImportError, why: - raise SystemError('Failed to load the builtin codecs: %s' % why) - -__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE", - "BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE", - "BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE", - "BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE", - "strict_errors", "ignore_errors", "replace_errors", - "xmlcharrefreplace_errors", - "register_error", "lookup_error"] - -### Constants - -# -# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF) -# and its possible byte string values -# for UTF8/UTF16/UTF32 output and little/big endian machines -# - -# UTF-8 -BOM_UTF8 = '\xef\xbb\xbf' - -# UTF-16, little endian -BOM_LE = BOM_UTF16_LE = '\xff\xfe' - -# UTF-16, big endian -BOM_BE = BOM_UTF16_BE = '\xfe\xff' - -# UTF-32, little endian -BOM_UTF32_LE = '\xff\xfe\x00\x00' - -# UTF-32, big endian -BOM_UTF32_BE = '\x00\x00\xfe\xff' - -if sys.byteorder == 'little': - - # UTF-16, native endianness - BOM = BOM_UTF16 = BOM_UTF16_LE - - # UTF-32, native endianness - BOM_UTF32 = BOM_UTF32_LE - -else: - - # UTF-16, native endianness - BOM = BOM_UTF16 = BOM_UTF16_BE - - # UTF-32, native endianness - BOM_UTF32 = BOM_UTF32_BE - -# Old broken names (don't use in new code) -BOM32_LE = BOM_UTF16_LE -BOM32_BE = BOM_UTF16_BE -BOM64_LE = BOM_UTF32_LE -BOM64_BE = BOM_UTF32_BE - - -### Codec base classes (defining the API) - -class CodecInfo(tuple): - - def __new__(cls, encode, decode, streamreader=None, streamwriter=None, - incrementalencoder=None, incrementaldecoder=None, name=None): - self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter)) - self.name = name - self.encode = encode - self.decode = decode - self.incrementalencoder = incrementalencoder - self.incrementaldecoder = incrementaldecoder - self.streamwriter = streamwriter - self.streamreader = streamreader - return self - - def __repr__(self): - return "<%s.%s object for encoding %s at 0x%x>" % (self.__class__.__module__, self.__class__.__name__, self.name, id(self)) - -class Codec: - - """ Defines the interface for stateless encoders/decoders. - - The .encode()/.decode() methods may use different error - handling schemes by providing the errors argument. These - string values are predefined: - - 'strict' - raise a ValueError error (or a subclass) - 'ignore' - ignore the character and continue with the next - 'replace' - replace with a suitable replacement character; - Python will use the official U+FFFD REPLACEMENT - CHARACTER for the builtin Unicode codecs on - decoding and '?' on encoding. - 'xmlcharrefreplace' - Replace with the appropriate XML - character reference (only for encoding). - 'backslashreplace' - Replace with backslashed escape sequences - (only for encoding). - - The set of allowed values can be extended via register_error. - - """ - def encode(self, input, errors='strict'): - - """ Encodes the object input and returns a tuple (output - object, length consumed). - - errors defines the error handling to apply. It defaults to - 'strict' handling. - - The method may not store state in the Codec instance. Use - StreamCodec for codecs which have to keep state in order to - make encoding/decoding efficient. - - The encoder must be able to handle zero length input and - return an empty object of the output object type in this - situation. - - """ - raise NotImplementedError - - def decode(self, input, errors='strict'): - - """ Decodes the object input and returns a tuple (output - object, length consumed). - - input must be an object which provides the bf_getreadbuf - buffer slot. Python strings, buffer objects and memory - mapped files are examples of objects providing this slot. - - errors defines the error handling to apply. It defaults to - 'strict' handling. - - The method may not store state in the Codec instance. Use - StreamCodec for codecs which have to keep state in order to - make encoding/decoding efficient. - - The decoder must be able to handle zero length input and - return an empty object of the output object type in this - situation. - - """ - raise NotImplementedError - -class IncrementalEncoder(object): - """ - An IncrementalEncoder encodes an input in multiple steps. The input can be - passed piece by piece to the encode() method. The IncrementalEncoder remembers - the state of the Encoding process between calls to encode(). - """ - def __init__(self, errors='strict'): - """ - Creates an IncrementalEncoder instance. - - The IncrementalEncoder may use different error handling schemes by - providing the errors keyword argument. See the module docstring - for a list of possible values. - """ - self.errors = errors - self.buffer = "" - - def encode(self, input, final=False): - """ - Encodes input and returns the resulting object. - """ - raise NotImplementedError - - def reset(self): - """ - Resets the encoder to the initial state. - """ - - def getstate(self): - """ - Return the current state of the encoder. - """ - return 0 - - def setstate(self, state): - """ - Set the current state of the encoder. state must have been - returned by getstate(). - """ - -class BufferedIncrementalEncoder(IncrementalEncoder): - """ - This subclass of IncrementalEncoder can be used as the baseclass for an - incremental encoder if the encoder must keep some of the output in a - buffer between calls to encode(). - """ - def __init__(self, errors='strict'): - IncrementalEncoder.__init__(self, errors) - self.buffer = "" # unencoded input that is kept between calls to encode() - - def _buffer_encode(self, input, errors, final): - # Overwrite this method in subclasses: It must encode input - # and return an (output, length consumed) tuple - raise NotImplementedError - - def encode(self, input, final=False): - # encode input (taking the buffer into account) - data = self.buffer + input - (result, consumed) = self._buffer_encode(data, self.errors, final) - # keep unencoded input until the next call - self.buffer = data[consumed:] - return result - - def reset(self): - IncrementalEncoder.reset(self) - self.buffer = "" - - def getstate(self): - return self.buffer or 0 - - def setstate(self, state): - self.buffer = state or "" - -class IncrementalDecoder(object): - """ - An IncrementalDecoder decodes an input in multiple steps. The input can be - passed piece by piece to the decode() method. The IncrementalDecoder - remembers the state of the decoding process between calls to decode(). - """ - def __init__(self, errors='strict'): - """ - Creates a IncrementalDecoder instance. - - The IncrementalDecoder may use different error handling schemes by - providing the errors keyword argument. See the module docstring - for a list of possible values. - """ - self.errors = errors - - def decode(self, input, final=False): - """ - Decodes input and returns the resulting object. - """ - raise NotImplementedError - - def reset(self): - """ - Resets the decoder to the initial state. - """ - - def getstate(self): - """ - Return the current state of the decoder. - - This must be a (buffered_input, additional_state_info) tuple. - buffered_input must be a bytes object containing bytes that - were passed to decode() that have not yet been converted. - additional_state_info must be a non-negative integer - representing the state of the decoder WITHOUT yet having - processed the contents of buffered_input. In the initial state - and after reset(), getstate() must return (b"", 0). - """ - return (b"", 0) - - def setstate(self, state): - """ - Set the current state of the decoder. - - state must have been returned by getstate(). The effect of - setstate((b"", 0)) must be equivalent to reset(). - """ - -class BufferedIncrementalDecoder(IncrementalDecoder): - """ - This subclass of IncrementalDecoder can be used as the baseclass for an - incremental decoder if the decoder must be able to handle incomplete byte - sequences. - """ - def __init__(self, errors='strict'): - IncrementalDecoder.__init__(self, errors) - self.buffer = "" # undecoded input that is kept between calls to decode() - - def _buffer_decode(self, input, errors, final): - # Overwrite this method in subclasses: It must decode input - # and return an (output, length consumed) tuple - raise NotImplementedError - - def decode(self, input, final=False): - # decode input (taking the buffer into account) - data = self.buffer + input - (result, consumed) = self._buffer_decode(data, self.errors, final) - # keep undecoded input until the next call - self.buffer = data[consumed:] - return result - - def reset(self): - IncrementalDecoder.reset(self) - self.buffer = "" - - def getstate(self): - # additional state info is always 0 - return (self.buffer, 0) - - def setstate(self, state): - # ignore additional state info - self.buffer = state[0] - -# -# The StreamWriter and StreamReader class provide generic working -# interfaces which can be used to implement new encoding submodules -# very easily. See encodings/utf_8.py for an example on how this is -# done. -# - -class StreamWriter(Codec): - - def __init__(self, stream, errors='strict'): - - """ Creates a StreamWriter instance. - - stream must be a file-like object open for writing - (binary) data. - - The StreamWriter may use different error handling - schemes by providing the errors keyword argument. These - parameters are predefined: - - 'strict' - raise a ValueError (or a subclass) - 'ignore' - ignore the character and continue with the next - 'replace'- replace with a suitable replacement character - 'xmlcharrefreplace' - Replace with the appropriate XML - character reference. - 'backslashreplace' - Replace with backslashed escape - sequences (only for encoding). - - The set of allowed parameter values can be extended via - register_error. - """ - self.stream = stream - self.errors = errors - - def write(self, object): - - """ Writes the object's contents encoded to self.stream. - """ - data, consumed = self.encode(object, self.errors) - self.stream.write(data) - - def writelines(self, list): - - """ Writes the concatenated list of strings to the stream - using .write(). - """ - self.write(''.join(list)) - - def reset(self): - - """ Flushes and resets the codec buffers used for keeping state. - - Calling this method should ensure that the data on the - output is put into a clean state, that allows appending - of new fresh data without having to rescan the whole - stream to recover state. - - """ - pass - - def __getattr__(self, name, - getattr=getattr): - - """ Inherit all other methods from the underlying stream. - """ - return getattr(self.stream, name) - - def __enter__(self): - return self - - def __exit__(self, type, value, tb): - self.stream.close() - -### - -class StreamReader(Codec): - - def __init__(self, stream, errors='strict'): - - """ Creates a StreamReader instance. - - stream must be a file-like object open for reading - (binary) data. - - The StreamReader may use different error handling - schemes by providing the errors keyword argument. These - parameters are predefined: - - 'strict' - raise a ValueError (or a subclass) - 'ignore' - ignore the character and continue with the next - 'replace'- replace with a suitable replacement character; - - The set of allowed parameter values can be extended via - register_error. - """ - self.stream = stream - self.errors = errors - self.bytebuffer = "" - # For str->str decoding this will stay a str - # For str->unicode decoding the first read will promote it to unicode - self.charbuffer = "" - self.linebuffer = None - - def decode(self, input, errors='strict'): - raise NotImplementedError - - def read(self, size=-1, chars=-1, firstline=False): - - """ Decodes data from the stream self.stream and returns the - resulting object. - - chars indicates the number of characters to read from the - stream. read() will never return more than chars - characters, but it might return less, if there are not enough - characters available. - - size indicates the approximate maximum number of bytes to - read from the stream for decoding purposes. The decoder - can modify this setting as appropriate. The default value - -1 indicates to read and decode as much as possible. size - is intended to prevent having to decode huge files in one - step. - - If firstline is true, and a UnicodeDecodeError happens - after the first line terminator in the input only the first line - will be returned, the rest of the input will be kept until the - next call to read(). - - The method should use a greedy read strategy meaning that - it should read as much data as is allowed within the - definition of the encoding and the given size, e.g. if - optional encoding endings or state markers are available - on the stream, these should be read too. - """ - # If we have lines cached, first merge them back into characters - if self.linebuffer: - self.charbuffer = "".join(self.linebuffer) - self.linebuffer = None - - # read until we get the required number of characters (if available) - while True: - # can the request can be satisfied from the character buffer? - if chars < 0: - if size < 0: - if self.charbuffer: - break - elif len(self.charbuffer) >= size: - break - else: - if len(self.charbuffer) >= chars: - break - # we need more data - if size < 0: - newdata = self.stream.read() - else: - newdata = self.stream.read(size) - # decode bytes (those remaining from the last call included) - data = self.bytebuffer + newdata - try: - newchars, decodedbytes = self.decode(data, self.errors) - except UnicodeDecodeError, exc: - if firstline: - newchars, decodedbytes = self.decode(data[:exc.start], self.errors) - lines = newchars.splitlines(True) - if len(lines)<=1: - raise - else: - raise - # keep undecoded bytes until the next call - self.bytebuffer = data[decodedbytes:] - # put new characters in the character buffer - self.charbuffer += newchars - # there was no data available - if not newdata: - break - if chars < 0: - # Return everything we've got - result = self.charbuffer - self.charbuffer = "" - else: - # Return the first chars characters - result = self.charbuffer[:chars] - self.charbuffer = self.charbuffer[chars:] - return result - - def readline(self, size=None, keepends=True): - - """ Read one line from the input stream and return the - decoded data. - - size, if given, is passed as size argument to the - read() method. - - """ - # If we have lines cached from an earlier read, return - # them unconditionally - if self.linebuffer: - line = self.linebuffer[0] - del self.linebuffer[0] - if len(self.linebuffer) == 1: - # revert to charbuffer mode; we might need more data - # next time - self.charbuffer = self.linebuffer[0] - self.linebuffer = None - if not keepends: - line = line.splitlines(False)[0] - return line - - readsize = size or 72 - line = "" - # If size is given, we call read() only once - while True: - data = self.read(readsize, firstline=True) - if data: - # If we're at a "\r" read one extra character (which might - # be a "\n") to get a proper line ending. If the stream is - # temporarily exhausted we return the wrong line ending. - if data.endswith("\r"): - data += self.read(size=1, chars=1) - - line += data - lines = line.splitlines(True) - if lines: - if len(lines) > 1: - # More than one line result; the first line is a full line - # to return - line = lines[0] - del lines[0] - if len(lines) > 1: - # cache the remaining lines - lines[-1] += self.charbuffer - self.linebuffer = lines - self.charbuffer = None - else: - # only one remaining line, put it back into charbuffer - self.charbuffer = lines[0] + self.charbuffer - if not keepends: - line = line.splitlines(False)[0] - break - line0withend = lines[0] - line0withoutend = lines[0].splitlines(False)[0] - if line0withend != line0withoutend: # We really have a line end - # Put the rest back together and keep it until the next call - self.charbuffer = "".join(lines[1:]) + self.charbuffer - if keepends: - line = line0withend - else: - line = line0withoutend - break - # we didn't get anything or this was our only try - if not data or size is not None: - if line and not keepends: - line = line.splitlines(False)[0] - break - if readsize<8000: - readsize *= 2 - return line - - def readlines(self, sizehint=None, keepends=True): - - """ Read all lines available on the input stream - and return them as list of lines. - - Line breaks are implemented using the codec's decoder - method and are included in the list entries. - - sizehint, if given, is ignored since there is no efficient - way to finding the true end-of-line. - - """ - data = self.read() - return data.splitlines(keepends) - - def reset(self): - - """ Resets the codec buffers used for keeping state. - - Note that no stream repositioning should take place. - This method is primarily intended to be able to recover - from decoding errors. - - """ - self.bytebuffer = "" - self.charbuffer = u"" - self.linebuffer = None - - def seek(self, offset, whence=0): - """ Set the input stream's current position. - - Resets the codec buffers used for keeping state. - """ - self.reset() - self.stream.seek(offset, whence) - - def next(self): - - """ Return the next decoded line from the input stream.""" - line = self.readline() - if line: - return line - raise StopIteration - - def __iter__(self): - return self - - def __getattr__(self, name, - getattr=getattr): - - """ Inherit all other methods from the underlying stream. - """ - return getattr(self.stream, name) - - def __enter__(self): - return self - - def __exit__(self, type, value, tb): - self.stream.close() - -### - -class StreamReaderWriter: - - """ StreamReaderWriter instances allow wrapping streams which - work in both read and write modes. - - The design is such that one can use the factory functions - returned by the codec.lookup() function to construct the - instance. - - """ - # Optional attributes set by the file wrappers below - encoding = 'unknown' - - def __init__(self, stream, Reader, Writer, errors='strict'): - - """ Creates a StreamReaderWriter instance. - - stream must be a Stream-like object. - - Reader, Writer must be factory functions or classes - providing the StreamReader, StreamWriter interface resp. - - Error handling is done in the same way as defined for the - StreamWriter/Readers. - - """ - self.stream = stream - self.reader = Reader(stream, errors) - self.writer = Writer(stream, errors) - self.errors = errors - - def read(self, size=-1): - - return self.reader.read(size) - - def readline(self, size=None): - - return self.reader.readline(size) - - def readlines(self, sizehint=None): - - return self.reader.readlines(sizehint) - - def next(self): - - """ Return the next decoded line from the input stream.""" - return self.reader.next() - - def __iter__(self): - return self - - def write(self, data): - - return self.writer.write(data) - - def writelines(self, list): - - return self.writer.writelines(list) - - def reset(self): - - self.reader.reset() - self.writer.reset() - - def __getattr__(self, name, - getattr=getattr): - - """ Inherit all other methods from the underlying stream. - """ - return getattr(self.stream, name) - - # these are needed to make "with codecs.open(...)" work properly - - def __enter__(self): - return self - - def __exit__(self, type, value, tb): - self.stream.close() - -### - -class StreamRecoder: - - """ StreamRecoder instances provide a frontend - backend - view of encoding data. - - They use the complete set of APIs returned by the - codecs.lookup() function to implement their task. - - Data written to the stream is first decoded into an - intermediate format (which is dependent on the given codec - combination) and then written to the stream using an instance - of the provided Writer class. - - In the other direction, data is read from the stream using a - Reader instance and then return encoded data to the caller. - - """ - # Optional attributes set by the file wrappers below - data_encoding = 'unknown' - file_encoding = 'unknown' - - def __init__(self, stream, encode, decode, Reader, Writer, - errors='strict'): - - """ Creates a StreamRecoder instance which implements a two-way - conversion: encode and decode work on the frontend (the - input to .read() and output of .write()) while - Reader and Writer work on the backend (reading and - writing to the stream). - - You can use these objects to do transparent direct - recodings from e.g. latin-1 to utf-8 and back. - - stream must be a file-like object. - - encode, decode must adhere to the Codec interface, Reader, - Writer must be factory functions or classes providing the - StreamReader, StreamWriter interface resp. - - encode and decode are needed for the frontend translation, - Reader and Writer for the backend translation. Unicode is - used as intermediate encoding. - - Error handling is done in the same way as defined for the - StreamWriter/Readers. - - """ - self.stream = stream - self.encode = encode - self.decode = decode - self.reader = Reader(stream, errors) - self.writer = Writer(stream, errors) - self.errors = errors - - def read(self, size=-1): - - data = self.reader.read(size) - data, bytesencoded = self.encode(data, self.errors) - return data - - def readline(self, size=None): - - if size is None: - data = self.reader.readline() - else: - data = self.reader.readline(size) - data, bytesencoded = self.encode(data, self.errors) - return data - - def readlines(self, sizehint=None): - - data = self.reader.read() - data, bytesencoded = self.encode(data, self.errors) - return data.splitlines(1) - - def next(self): - - """ Return the next decoded line from the input stream.""" - data = self.reader.next() - data, bytesencoded = self.encode(data, self.errors) - return data - - def __iter__(self): - return self - - def write(self, data): - - data, bytesdecoded = self.decode(data, self.errors) - return self.writer.write(data) - - def writelines(self, list): - - data = ''.join(list) - data, bytesdecoded = self.decode(data, self.errors) - return self.writer.write(data) - - def reset(self): - - self.reader.reset() - self.writer.reset() - - def __getattr__(self, name, - getattr=getattr): - - """ Inherit all other methods from the underlying stream. - """ - return getattr(self.stream, name) - - def __enter__(self): - return self - - def __exit__(self, type, value, tb): - self.stream.close() - -### Shortcuts - -def open(filename, mode='rb', encoding=None, errors='strict', buffering=1): - - """ Open an encoded file using the given mode and return - a wrapped version providing transparent encoding/decoding. - - Note: The wrapped version will only accept the object format - defined by the codecs, i.e. Unicode objects for most builtin - codecs. Output is also codec dependent and will usually be - Unicode as well. - - Files are always opened in binary mode, even if no binary mode - was specified. This is done to avoid data loss due to encodings - using 8-bit values. The default file mode is 'rb' meaning to - open the file in binary read mode. - - encoding specifies the encoding which is to be used for the - file. - - errors may be given to define the error handling. It defaults - to 'strict' which causes ValueErrors to be raised in case an - encoding error occurs. - - buffering has the same meaning as for the builtin open() API. - It defaults to line buffered. - - The returned wrapped file object provides an extra attribute - .encoding which allows querying the used encoding. This - attribute is only available if an encoding was specified as - parameter. - - """ - if encoding is not None and \ - 'b' not in mode: - # Force opening of the file in binary mode - mode = mode + 'b' - file = __builtin__.open(filename, mode, buffering) - if encoding is None: - return file - info = lookup(encoding) - srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors) - # Add attributes to simplify introspection - srw.encoding = encoding - return srw - -def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'): - - """ Return a wrapped version of file which provides transparent - encoding translation. - - Strings written to the wrapped file are interpreted according - to the given data_encoding and then written to the original - file as string using file_encoding. The intermediate encoding - will usually be Unicode but depends on the specified codecs. - - Strings are read from the file using file_encoding and then - passed back to the caller as string using data_encoding. - - If file_encoding is not given, it defaults to data_encoding. - - errors may be given to define the error handling. It defaults - to 'strict' which causes ValueErrors to be raised in case an - encoding error occurs. - - The returned wrapped file object provides two extra attributes - .data_encoding and .file_encoding which reflect the given - parameters of the same name. The attributes can be used for - introspection by Python programs. - - """ - if file_encoding is None: - file_encoding = data_encoding - data_info = lookup(data_encoding) - file_info = lookup(file_encoding) - sr = StreamRecoder(file, data_info.encode, data_info.decode, - file_info.streamreader, file_info.streamwriter, errors) - # Add attributes to simplify introspection - sr.data_encoding = data_encoding - sr.file_encoding = file_encoding - return sr - -### Helpers for codec lookup - -def getencoder(encoding): - - """ Lookup up the codec for the given encoding and return - its encoder function. - - Raises a LookupError in case the encoding cannot be found. - - """ - return lookup(encoding).encode - -def getdecoder(encoding): - - """ Lookup up the codec for the given encoding and return - its decoder function. - - Raises a LookupError in case the encoding cannot be found. - - """ - return lookup(encoding).decode - -def getincrementalencoder(encoding): - - """ Lookup up the codec for the given encoding and return - its IncrementalEncoder class or factory function. - - Raises a LookupError in case the encoding cannot be found - or the codecs doesn't provide an incremental encoder. - - """ - encoder = lookup(encoding).incrementalencoder - if encoder is None: - raise LookupError(encoding) - return encoder - -def getincrementaldecoder(encoding): - - """ Lookup up the codec for the given encoding and return - its IncrementalDecoder class or factory function. - - Raises a LookupError in case the encoding cannot be found - or the codecs doesn't provide an incremental decoder. - - """ - decoder = lookup(encoding).incrementaldecoder - if decoder is None: - raise LookupError(encoding) - return decoder - -def getreader(encoding): - - """ Lookup up the codec for the given encoding and return - its StreamReader class or factory function. - - Raises a LookupError in case the encoding cannot be found. - - """ - return lookup(encoding).streamreader - -def getwriter(encoding): - - """ Lookup up the codec for the given encoding and return - its StreamWriter class or factory function. - - Raises a LookupError in case the encoding cannot be found. - - """ - return lookup(encoding).streamwriter - -def iterencode(iterator, encoding, errors='strict', **kwargs): - """ - Encoding iterator. - - Encodes the input strings from the iterator using a IncrementalEncoder. - - errors and kwargs are passed through to the IncrementalEncoder - constructor. - """ - encoder = getincrementalencoder(encoding)(errors, **kwargs) - for input in iterator: - output = encoder.encode(input) - if output: - yield output - output = encoder.encode("", True) - if output: - yield output - -def iterdecode(iterator, encoding, errors='strict', **kwargs): - """ - Decoding iterator. - - Decodes the input strings from the iterator using a IncrementalDecoder. - - errors and kwargs are passed through to the IncrementalDecoder - constructor. - """ - decoder = getincrementaldecoder(encoding)(errors, **kwargs) - for input in iterator: - output = decoder.decode(input) - if output: - yield output - output = decoder.decode("", True) - if output: - yield output - -### Helpers for charmap-based codecs - -def make_identity_dict(rng): - - """ make_identity_dict(rng) -> dict - - Return a dictionary where elements of the rng sequence are - mapped to themselves. - - """ - res = {} - for i in rng: - res[i]=i - return res - -def make_encoding_map(decoding_map): - - """ Creates an encoding map from a decoding map. - - If a target mapping in the decoding map occurs multiple - times, then that target is mapped to None (undefined mapping), - causing an exception when encountered by the charmap codec - during translation. - - One example where this happens is cp875.py which decodes - multiple character to \u001a. - - """ - m = {} - for k,v in decoding_map.items(): - if not v in m: - m[v] = k - else: - m[v] = None - return m - -### error handlers - -try: - strict_errors = lookup_error("strict") - ignore_errors = lookup_error("ignore") - replace_errors = lookup_error("replace") - xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace") - backslashreplace_errors = lookup_error("backslashreplace") -except LookupError: - # In --disable-unicode builds, these error handler are missing - strict_errors = None - ignore_errors = None - replace_errors = None - xmlcharrefreplace_errors = None - backslashreplace_errors = None - -# Tell modulefinder that using codecs probably needs the encodings -# package -_false = 0 -if _false: - import encodings - -### Tests - -if __name__ == '__main__': - - # Make stdout translate Latin-1 output into UTF-8 output - sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8') - - # Have stdin translate Latin-1 input into UTF-8 input - sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1') diff --git a/icarus-miner/data/usr/lib/python2.6/codecs.pyc b/icarus-miner/data/usr/lib/python2.6/codecs.pyc deleted file mode 100644 index c7f8375..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/codecs.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/collections.py b/icarus-miner/data/usr/lib/python2.6/collections.py deleted file mode 100644 index b81effe..0000000 --- a/icarus-miner/data/usr/lib/python2.6/collections.py +++ /dev/null @@ -1,149 +0,0 @@ -__all__ = ['deque', 'defaultdict', 'namedtuple'] -# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py. -# They should however be considered an integral part of collections.py. -from _abcoll import * -import _abcoll -__all__ += _abcoll.__all__ - -from _collections import deque, defaultdict -from operator import itemgetter as _itemgetter -from keyword import iskeyword as _iskeyword -import sys as _sys - -def namedtuple(typename, field_names, verbose=False): - """Returns a new subclass of tuple with named fields. - - >>> Point = namedtuple('Point', 'x y') - >>> Point.__doc__ # docstring for the new class - 'Point(x, y)' - >>> p = Point(11, y=22) # instantiate with positional args or keywords - >>> p[0] + p[1] # indexable like a plain tuple - 33 - >>> x, y = p # unpack like a regular tuple - >>> x, y - (11, 22) - >>> p.x + p.y # fields also accessable by name - 33 - >>> d = p._asdict() # convert to a dictionary - >>> d['x'] - 11 - >>> Point(**d) # convert from a dictionary - Point(x=11, y=22) - >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields - Point(x=100, y=22) - - """ - - # Parse and validate the field names. Validation serves two purposes, - # generating informative error messages and preventing template injection attacks. - if isinstance(field_names, basestring): - field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas - field_names = tuple(map(str, field_names)) - for name in (typename,) + field_names: - if not all(c.isalnum() or c=='_' for c in name): - raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name) - if _iskeyword(name): - raise ValueError('Type names and field names cannot be a keyword: %r' % name) - if name[0].isdigit(): - raise ValueError('Type names and field names cannot start with a number: %r' % name) - seen_names = set() - for name in field_names: - if name.startswith('_'): - raise ValueError('Field names cannot start with an underscore: %r' % name) - if name in seen_names: - raise ValueError('Encountered duplicate field name: %r' % name) - seen_names.add(name) - - # Create and fill-in the class template - numfields = len(field_names) - argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes - reprtxt = ', '.join('%s=%%r' % name for name in field_names) - dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names)) - template = '''class %(typename)s(tuple): - '%(typename)s(%(argtxt)s)' \n - __slots__ = () \n - _fields = %(field_names)r \n - def __new__(_cls, %(argtxt)s): - return _tuple.__new__(_cls, (%(argtxt)s)) \n - @classmethod - def _make(cls, iterable, new=tuple.__new__, len=len): - 'Make a new %(typename)s object from a sequence or iterable' - result = new(cls, iterable) - if len(result) != %(numfields)d: - raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result)) - return result \n - def __repr__(self): - return '%(typename)s(%(reprtxt)s)' %% self \n - def _asdict(t): - 'Return a new dict which maps field names to their values' - return {%(dicttxt)s} \n - def _replace(_self, **kwds): - 'Return a new %(typename)s object replacing specified fields with new values' - result = _self._make(map(kwds.pop, %(field_names)r, _self)) - if kwds: - raise ValueError('Got unexpected field names: %%r' %% kwds.keys()) - return result \n - def __getnewargs__(self): - return tuple(self) \n\n''' % locals() - for i, name in enumerate(field_names): - template += ' %s = _property(_itemgetter(%d))\n' % (name, i) - if verbose: - print template - - # Execute the template string in a temporary namespace and - # support tracing utilities by setting a value for frame.f_globals['__name__'] - namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename, - _property=property, _tuple=tuple) - try: - exec template in namespace - except SyntaxError, e: - raise SyntaxError(e.message + ':\n' + template) - result = namespace[typename] - - # For pickling to work, the __module__ variable needs to be set to the frame - # where the named tuple is created. Bypass this step in enviroments where - # sys._getframe is not defined (Jython for example). - if hasattr(_sys, '_getframe'): - result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__') - - return result - - - - - - -if __name__ == '__main__': - # verify that instances can be pickled - from cPickle import loads, dumps - Point = namedtuple('Point', 'x, y', True) - p = Point(x=10, y=20) - assert p == loads(dumps(p)) - - # test and demonstrate ability to override methods - class Point(namedtuple('Point', 'x y')): - __slots__ = () - @property - def hypot(self): - return (self.x ** 2 + self.y ** 2) ** 0.5 - def __str__(self): - return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot) - - for p in Point(3, 4), Point(14, 5/7.): - print p - - class Point(namedtuple('Point', 'x y')): - 'Point class with optimized _make() and _replace() without error-checking' - __slots__ = () - _make = classmethod(tuple.__new__) - def _replace(self, _map=map, **kwds): - return self._make(_map(kwds.get, ('x', 'y'), self)) - - print Point(11, 22)._replace(x=100) - - Point3D = namedtuple('Point3D', Point._fields + ('z',)) - print Point3D.__doc__ - - import doctest - TestResults = namedtuple('TestResults', 'failed attempted') - print TestResults(*doctest.testmod()) diff --git a/icarus-miner/data/usr/lib/python2.6/collections.pyc b/icarus-miner/data/usr/lib/python2.6/collections.pyc deleted file mode 100644 index 25973c7..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/collections.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/copy_reg.py b/icarus-miner/data/usr/lib/python2.6/copy_reg.py deleted file mode 100644 index db17150..0000000 --- a/icarus-miner/data/usr/lib/python2.6/copy_reg.py +++ /dev/null @@ -1,201 +0,0 @@ -"""Helper to provide extensibility for pickle/cPickle. - -This is only useful to add pickle support for extension types defined in -C, not for instances of user-defined classes. -""" - -from types import ClassType as _ClassType - -__all__ = ["pickle", "constructor", - "add_extension", "remove_extension", "clear_extension_cache"] - -dispatch_table = {} - -def pickle(ob_type, pickle_function, constructor_ob=None): - if type(ob_type) is _ClassType: - raise TypeError("copy_reg is not intended for use with classes") - - if not hasattr(pickle_function, '__call__'): - raise TypeError("reduction functions must be callable") - dispatch_table[ob_type] = pickle_function - - # The constructor_ob function is a vestige of safe for unpickling. - # There is no reason for the caller to pass it anymore. - if constructor_ob is not None: - constructor(constructor_ob) - -def constructor(object): - if not hasattr(object, '__call__'): - raise TypeError("constructors must be callable") - -# Example: provide pickling support for complex numbers. - -try: - complex -except NameError: - pass -else: - - def pickle_complex(c): - return complex, (c.real, c.imag) - - pickle(complex, pickle_complex, complex) - -# Support for pickling new-style objects - -def _reconstructor(cls, base, state): - if base is object: - obj = object.__new__(cls) - else: - obj = base.__new__(cls, state) - if base.__init__ != object.__init__: - base.__init__(obj, state) - return obj - -_HEAPTYPE = 1<<9 - -# Python code for object.__reduce_ex__ for protocols 0 and 1 - -def _reduce_ex(self, proto): - assert proto < 2 - for base in self.__class__.__mro__: - if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE: - break - else: - base = object # not really reachable - if base is object: - state = None - else: - if base is self.__class__: - raise TypeError, "can't pickle %s objects" % base.__name__ - state = base(self) - args = (self.__class__, base, state) - try: - getstate = self.__getstate__ - except AttributeError: - if getattr(self, "__slots__", None): - raise TypeError("a class that defines __slots__ without " - "defining __getstate__ cannot be pickled") - try: - dict = self.__dict__ - except AttributeError: - dict = None - else: - dict = getstate() - if dict: - return _reconstructor, args, dict - else: - return _reconstructor, args - -# Helper for __reduce_ex__ protocol 2 - -def __newobj__(cls, *args): - return cls.__new__(cls, *args) - -def _slotnames(cls): - """Return a list of slot names for a given class. - - This needs to find slots defined by the class and its bases, so we - can't simply return the __slots__ attribute. We must walk down - the Method Resolution Order and concatenate the __slots__ of each - class found there. (This assumes classes don't modify their - __slots__ attribute to misrepresent their slots after the class is - defined.) - """ - - # Get the value from a cache in the class if possible - names = cls.__dict__.get("__slotnames__") - if names is not None: - return names - - # Not cached -- calculate the value - names = [] - if not hasattr(cls, "__slots__"): - # This class has no slots - pass - else: - # Slots found -- gather slot names from all base classes - for c in cls.__mro__: - if "__slots__" in c.__dict__: - slots = c.__dict__['__slots__'] - # if class has a single slot, it can be given as a string - if isinstance(slots, basestring): - slots = (slots,) - for name in slots: - # special descriptors - if name in ("__dict__", "__weakref__"): - continue - # mangled names - elif name.startswith('__') and not name.endswith('__'): - names.append('_%s%s' % (c.__name__, name)) - else: - names.append(name) - - # Cache the outcome in the class if at all possible - try: - cls.__slotnames__ = names - except: - pass # But don't die if we can't - - return names - -# A registry of extension codes. This is an ad-hoc compression -# mechanism. Whenever a global reference to , is about -# to be pickled, the (, ) tuple is looked up here to see -# if it is a registered extension code for it. Extension codes are -# universal, so that the meaning of a pickle does not depend on -# context. (There are also some codes reserved for local use that -# don't have this restriction.) Codes are positive ints; 0 is -# reserved. - -_extension_registry = {} # key -> code -_inverted_registry = {} # code -> key -_extension_cache = {} # code -> object -# Don't ever rebind those names: cPickle grabs a reference to them when -# it's initialized, and won't see a rebinding. - -def add_extension(module, name, code): - """Register an extension code.""" - code = int(code) - if not 1 <= code <= 0x7fffffff: - raise ValueError, "code out of range" - key = (module, name) - if (_extension_registry.get(key) == code and - _inverted_registry.get(code) == key): - return # Redundant registrations are benign - if key in _extension_registry: - raise ValueError("key %s is already registered with code %s" % - (key, _extension_registry[key])) - if code in _inverted_registry: - raise ValueError("code %s is already in use for key %s" % - (code, _inverted_registry[code])) - _extension_registry[key] = code - _inverted_registry[code] = key - -def remove_extension(module, name, code): - """Unregister an extension code. For testing only.""" - key = (module, name) - if (_extension_registry.get(key) != code or - _inverted_registry.get(code) != key): - raise ValueError("key %s is not registered with code %s" % - (key, code)) - del _extension_registry[key] - del _inverted_registry[code] - if code in _extension_cache: - del _extension_cache[code] - -def clear_extension_cache(): - _extension_cache.clear() - -# Standard extension code assignments - -# Reserved ranges - -# First Last Count Purpose -# 1 127 127 Reserved for Python standard library -# 128 191 64 Reserved for Zope -# 192 239 48 Reserved for 3rd parties -# 240 255 16 Reserved for private use (will never be assigned) -# 256 Inf Inf Reserved for future assignment - -# Extension codes are assigned by the Python Software Foundation. diff --git a/icarus-miner/data/usr/lib/python2.6/copy_reg.pyc b/icarus-miner/data/usr/lib/python2.6/copy_reg.pyc deleted file mode 100644 index 058a0a8..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/copy_reg.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/encodings/__init__.py b/icarus-miner/data/usr/lib/python2.6/encodings/__init__.py deleted file mode 100644 index aea7c5e..0000000 --- a/icarus-miner/data/usr/lib/python2.6/encodings/__init__.py +++ /dev/null @@ -1,157 +0,0 @@ -""" Standard "encodings" Package - - Standard Python encoding modules are stored in this package - directory. - - Codec modules must have names corresponding to normalized encoding - names as defined in the normalize_encoding() function below, e.g. - 'utf-8' must be implemented by the module 'utf_8.py'. - - Each codec module must export the following interface: - - * getregentry() -> codecs.CodecInfo object - The getregentry() API must a CodecInfo object with encoder, decoder, - incrementalencoder, incrementaldecoder, streamwriter and streamreader - atttributes which adhere to the Python Codec Interface Standard. - - In addition, a module may optionally also define the following - APIs which are then used by the package's codec search function: - - * getaliases() -> sequence of encoding name strings to use as aliases - - Alias names returned by getaliases() must be normalized encoding - names as defined by normalize_encoding(). - -Written by Marc-Andre Lemburg (mal@lemburg.com). - -(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. - -"""#" - -import codecs -from encodings import aliases -import __builtin__ - -_cache = {} -_unknown = '--unknown--' -_import_tail = ['*'] -_norm_encoding_map = (' . ' - '0123456789 ABCDEFGHIJKLMNOPQRSTUVWXYZ ' - ' abcdefghijklmnopqrstuvwxyz ' - ' ' - ' ' - ' ') -_aliases = aliases.aliases - -class CodecRegistryError(LookupError, SystemError): - pass - -def normalize_encoding(encoding): - - """ Normalize an encoding name. - - Normalization works as follows: all non-alphanumeric - characters except the dot used for Python package names are - collapsed and replaced with a single underscore, e.g. ' -;#' - becomes '_'. Leading and trailing underscores are removed. - - Note that encoding names should be ASCII only; if they do use - non-ASCII characters, these must be Latin-1 compatible. - - """ - # Make sure we have an 8-bit string, because .translate() works - # differently for Unicode strings. - if hasattr(__builtin__, "unicode") and isinstance(encoding, unicode): - # Note that .encode('latin-1') does *not* use the codec - # registry, so this call doesn't recurse. (See unicodeobject.c - # PyUnicode_AsEncodedString() for details) - encoding = encoding.encode('latin-1') - return '_'.join(encoding.translate(_norm_encoding_map).split()) - -def search_function(encoding): - - # Cache lookup - entry = _cache.get(encoding, _unknown) - if entry is not _unknown: - return entry - - # Import the module: - # - # First try to find an alias for the normalized encoding - # name and lookup the module using the aliased name, then try to - # lookup the module using the standard import scheme, i.e. first - # try in the encodings package, then at top-level. - # - norm_encoding = normalize_encoding(encoding) - aliased_encoding = _aliases.get(norm_encoding) or \ - _aliases.get(norm_encoding.replace('.', '_')) - if aliased_encoding is not None: - modnames = [aliased_encoding, - norm_encoding] - else: - modnames = [norm_encoding] - for modname in modnames: - if not modname or '.' in modname: - continue - try: - # Import is absolute to prevent the possibly malicious import of a - # module with side-effects that is not in the 'encodings' package. - mod = __import__('encodings.' + modname, fromlist=_import_tail, - level=0) - except ImportError: - pass - else: - break - else: - mod = None - - try: - getregentry = mod.getregentry - except AttributeError: - # Not a codec module - mod = None - - if mod is None: - # Cache misses - _cache[encoding] = None - return None - - # Now ask the module for the registry entry - entry = getregentry() - if not isinstance(entry, codecs.CodecInfo): - if not 4 <= len(entry) <= 7: - raise CodecRegistryError,\ - 'module "%s" (%s) failed to register' % \ - (mod.__name__, mod.__file__) - if not callable(entry[0]) or \ - not callable(entry[1]) or \ - (entry[2] is not None and not callable(entry[2])) or \ - (entry[3] is not None and not callable(entry[3])) or \ - (len(entry) > 4 and entry[4] is not None and not callable(entry[4])) or \ - (len(entry) > 5 and entry[5] is not None and not callable(entry[5])): - raise CodecRegistryError,\ - 'incompatible codecs in module "%s" (%s)' % \ - (mod.__name__, mod.__file__) - if len(entry)<7 or entry[6] is None: - entry += (None,)*(6-len(entry)) + (mod.__name__.split(".", 1)[1],) - entry = codecs.CodecInfo(*entry) - - # Cache the codec registry entry - _cache[encoding] = entry - - # Register its aliases (without overwriting previously registered - # aliases) - try: - codecaliases = mod.getaliases() - except AttributeError: - pass - else: - for alias in codecaliases: - if not _aliases.has_key(alias): - _aliases[alias] = modname - - # Return the registry entry - return entry - -# Register the search_function in the Python codec registry -codecs.register(search_function) diff --git a/icarus-miner/data/usr/lib/python2.6/encodings/__init__.pyc b/icarus-miner/data/usr/lib/python2.6/encodings/__init__.pyc deleted file mode 100644 index afc8b65..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/encodings/__init__.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/encodings/aliases.py b/icarus-miner/data/usr/lib/python2.6/encodings/aliases.py deleted file mode 100644 index c6f5aeb..0000000 --- a/icarus-miner/data/usr/lib/python2.6/encodings/aliases.py +++ /dev/null @@ -1,522 +0,0 @@ -""" Encoding Aliases Support - - This module is used by the encodings package search function to - map encodings names to module names. - - Note that the search function normalizes the encoding names before - doing the lookup, so the mapping will have to map normalized - encoding names to module names. - - Contents: - - The following aliases dictionary contains mappings of all IANA - character set names for which the Python core library provides - codecs. In addition to these, a few Python specific codec - aliases have also been added. - -""" -aliases = { - - # Please keep this list sorted alphabetically by value ! - - # ascii codec - '646' : 'ascii', - 'ansi_x3.4_1968' : 'ascii', - 'ansi_x3_4_1968' : 'ascii', # some email headers use this non-standard name - 'ansi_x3.4_1986' : 'ascii', - 'cp367' : 'ascii', - 'csascii' : 'ascii', - 'ibm367' : 'ascii', - 'iso646_us' : 'ascii', - 'iso_646.irv_1991' : 'ascii', - 'iso_ir_6' : 'ascii', - 'us' : 'ascii', - 'us_ascii' : 'ascii', - - # base64_codec codec - 'base64' : 'base64_codec', - 'base_64' : 'base64_codec', - - # big5 codec - 'big5_tw' : 'big5', - 'csbig5' : 'big5', - - # big5hkscs codec - 'big5_hkscs' : 'big5hkscs', - 'hkscs' : 'big5hkscs', - - # bz2_codec codec - 'bz2' : 'bz2_codec', - - # cp037 codec - '037' : 'cp037', - 'csibm037' : 'cp037', - 'ebcdic_cp_ca' : 'cp037', - 'ebcdic_cp_nl' : 'cp037', - 'ebcdic_cp_us' : 'cp037', - 'ebcdic_cp_wt' : 'cp037', - 'ibm037' : 'cp037', - 'ibm039' : 'cp037', - - # cp1026 codec - '1026' : 'cp1026', - 'csibm1026' : 'cp1026', - 'ibm1026' : 'cp1026', - - # cp1140 codec - '1140' : 'cp1140', - 'ibm1140' : 'cp1140', - - # cp1250 codec - '1250' : 'cp1250', - 'windows_1250' : 'cp1250', - - # cp1251 codec - '1251' : 'cp1251', - 'windows_1251' : 'cp1251', - - # cp1252 codec - '1252' : 'cp1252', - 'windows_1252' : 'cp1252', - - # cp1253 codec - '1253' : 'cp1253', - 'windows_1253' : 'cp1253', - - # cp1254 codec - '1254' : 'cp1254', - 'windows_1254' : 'cp1254', - - # cp1255 codec - '1255' : 'cp1255', - 'windows_1255' : 'cp1255', - - # cp1256 codec - '1256' : 'cp1256', - 'windows_1256' : 'cp1256', - - # cp1257 codec - '1257' : 'cp1257', - 'windows_1257' : 'cp1257', - - # cp1258 codec - '1258' : 'cp1258', - 'windows_1258' : 'cp1258', - - # cp424 codec - '424' : 'cp424', - 'csibm424' : 'cp424', - 'ebcdic_cp_he' : 'cp424', - 'ibm424' : 'cp424', - - # cp437 codec - '437' : 'cp437', - 'cspc8codepage437' : 'cp437', - 'ibm437' : 'cp437', - - # cp500 codec - '500' : 'cp500', - 'csibm500' : 'cp500', - 'ebcdic_cp_be' : 'cp500', - 'ebcdic_cp_ch' : 'cp500', - 'ibm500' : 'cp500', - - # cp775 codec - '775' : 'cp775', - 'cspc775baltic' : 'cp775', - 'ibm775' : 'cp775', - - # cp850 codec - '850' : 'cp850', - 'cspc850multilingual' : 'cp850', - 'ibm850' : 'cp850', - - # cp852 codec - '852' : 'cp852', - 'cspcp852' : 'cp852', - 'ibm852' : 'cp852', - - # cp855 codec - '855' : 'cp855', - 'csibm855' : 'cp855', - 'ibm855' : 'cp855', - - # cp857 codec - '857' : 'cp857', - 'csibm857' : 'cp857', - 'ibm857' : 'cp857', - - # cp860 codec - '860' : 'cp860', - 'csibm860' : 'cp860', - 'ibm860' : 'cp860', - - # cp861 codec - '861' : 'cp861', - 'cp_is' : 'cp861', - 'csibm861' : 'cp861', - 'ibm861' : 'cp861', - - # cp862 codec - '862' : 'cp862', - 'cspc862latinhebrew' : 'cp862', - 'ibm862' : 'cp862', - - # cp863 codec - '863' : 'cp863', - 'csibm863' : 'cp863', - 'ibm863' : 'cp863', - - # cp864 codec - '864' : 'cp864', - 'csibm864' : 'cp864', - 'ibm864' : 'cp864', - - # cp865 codec - '865' : 'cp865', - 'csibm865' : 'cp865', - 'ibm865' : 'cp865', - - # cp866 codec - '866' : 'cp866', - 'csibm866' : 'cp866', - 'ibm866' : 'cp866', - - # cp869 codec - '869' : 'cp869', - 'cp_gr' : 'cp869', - 'csibm869' : 'cp869', - 'ibm869' : 'cp869', - - # cp932 codec - '932' : 'cp932', - 'ms932' : 'cp932', - 'mskanji' : 'cp932', - 'ms_kanji' : 'cp932', - - # cp949 codec - '949' : 'cp949', - 'ms949' : 'cp949', - 'uhc' : 'cp949', - - # cp950 codec - '950' : 'cp950', - 'ms950' : 'cp950', - - # euc_jis_2004 codec - 'jisx0213' : 'euc_jis_2004', - 'eucjis2004' : 'euc_jis_2004', - 'euc_jis2004' : 'euc_jis_2004', - - # euc_jisx0213 codec - 'eucjisx0213' : 'euc_jisx0213', - - # euc_jp codec - 'eucjp' : 'euc_jp', - 'ujis' : 'euc_jp', - 'u_jis' : 'euc_jp', - - # euc_kr codec - 'euckr' : 'euc_kr', - 'korean' : 'euc_kr', - 'ksc5601' : 'euc_kr', - 'ks_c_5601' : 'euc_kr', - 'ks_c_5601_1987' : 'euc_kr', - 'ksx1001' : 'euc_kr', - 'ks_x_1001' : 'euc_kr', - - # gb18030 codec - 'gb18030_2000' : 'gb18030', - - # gb2312 codec - 'chinese' : 'gb2312', - 'csiso58gb231280' : 'gb2312', - 'euc_cn' : 'gb2312', - 'euccn' : 'gb2312', - 'eucgb2312_cn' : 'gb2312', - 'gb2312_1980' : 'gb2312', - 'gb2312_80' : 'gb2312', - 'iso_ir_58' : 'gb2312', - - # gbk codec - '936' : 'gbk', - 'cp936' : 'gbk', - 'ms936' : 'gbk', - - # hex_codec codec - 'hex' : 'hex_codec', - - # hp_roman8 codec - 'roman8' : 'hp_roman8', - 'r8' : 'hp_roman8', - 'csHPRoman8' : 'hp_roman8', - - # hz codec - 'hzgb' : 'hz', - 'hz_gb' : 'hz', - 'hz_gb_2312' : 'hz', - - # iso2022_jp codec - 'csiso2022jp' : 'iso2022_jp', - 'iso2022jp' : 'iso2022_jp', - 'iso_2022_jp' : 'iso2022_jp', - - # iso2022_jp_1 codec - 'iso2022jp_1' : 'iso2022_jp_1', - 'iso_2022_jp_1' : 'iso2022_jp_1', - - # iso2022_jp_2 codec - 'iso2022jp_2' : 'iso2022_jp_2', - 'iso_2022_jp_2' : 'iso2022_jp_2', - - # iso2022_jp_2004 codec - 'iso_2022_jp_2004' : 'iso2022_jp_2004', - 'iso2022jp_2004' : 'iso2022_jp_2004', - - # iso2022_jp_3 codec - 'iso2022jp_3' : 'iso2022_jp_3', - 'iso_2022_jp_3' : 'iso2022_jp_3', - - # iso2022_jp_ext codec - 'iso2022jp_ext' : 'iso2022_jp_ext', - 'iso_2022_jp_ext' : 'iso2022_jp_ext', - - # iso2022_kr codec - 'csiso2022kr' : 'iso2022_kr', - 'iso2022kr' : 'iso2022_kr', - 'iso_2022_kr' : 'iso2022_kr', - - # iso8859_10 codec - 'csisolatin6' : 'iso8859_10', - 'iso_8859_10' : 'iso8859_10', - 'iso_8859_10_1992' : 'iso8859_10', - 'iso_ir_157' : 'iso8859_10', - 'l6' : 'iso8859_10', - 'latin6' : 'iso8859_10', - - # iso8859_11 codec - 'thai' : 'iso8859_11', - 'iso_8859_11' : 'iso8859_11', - 'iso_8859_11_2001' : 'iso8859_11', - - # iso8859_13 codec - 'iso_8859_13' : 'iso8859_13', - 'l7' : 'iso8859_13', - 'latin7' : 'iso8859_13', - - # iso8859_14 codec - 'iso_8859_14' : 'iso8859_14', - 'iso_8859_14_1998' : 'iso8859_14', - 'iso_celtic' : 'iso8859_14', - 'iso_ir_199' : 'iso8859_14', - 'l8' : 'iso8859_14', - 'latin8' : 'iso8859_14', - - # iso8859_15 codec - 'iso_8859_15' : 'iso8859_15', - 'l9' : 'iso8859_15', - 'latin9' : 'iso8859_15', - - # iso8859_16 codec - 'iso_8859_16' : 'iso8859_16', - 'iso_8859_16_2001' : 'iso8859_16', - 'iso_ir_226' : 'iso8859_16', - 'l10' : 'iso8859_16', - 'latin10' : 'iso8859_16', - - # iso8859_2 codec - 'csisolatin2' : 'iso8859_2', - 'iso_8859_2' : 'iso8859_2', - 'iso_8859_2_1987' : 'iso8859_2', - 'iso_ir_101' : 'iso8859_2', - 'l2' : 'iso8859_2', - 'latin2' : 'iso8859_2', - - # iso8859_3 codec - 'csisolatin3' : 'iso8859_3', - 'iso_8859_3' : 'iso8859_3', - 'iso_8859_3_1988' : 'iso8859_3', - 'iso_ir_109' : 'iso8859_3', - 'l3' : 'iso8859_3', - 'latin3' : 'iso8859_3', - - # iso8859_4 codec - 'csisolatin4' : 'iso8859_4', - 'iso_8859_4' : 'iso8859_4', - 'iso_8859_4_1988' : 'iso8859_4', - 'iso_ir_110' : 'iso8859_4', - 'l4' : 'iso8859_4', - 'latin4' : 'iso8859_4', - - # iso8859_5 codec - 'csisolatincyrillic' : 'iso8859_5', - 'cyrillic' : 'iso8859_5', - 'iso_8859_5' : 'iso8859_5', - 'iso_8859_5_1988' : 'iso8859_5', - 'iso_ir_144' : 'iso8859_5', - - # iso8859_6 codec - 'arabic' : 'iso8859_6', - 'asmo_708' : 'iso8859_6', - 'csisolatinarabic' : 'iso8859_6', - 'ecma_114' : 'iso8859_6', - 'iso_8859_6' : 'iso8859_6', - 'iso_8859_6_1987' : 'iso8859_6', - 'iso_ir_127' : 'iso8859_6', - - # iso8859_7 codec - 'csisolatingreek' : 'iso8859_7', - 'ecma_118' : 'iso8859_7', - 'elot_928' : 'iso8859_7', - 'greek' : 'iso8859_7', - 'greek8' : 'iso8859_7', - 'iso_8859_7' : 'iso8859_7', - 'iso_8859_7_1987' : 'iso8859_7', - 'iso_ir_126' : 'iso8859_7', - - # iso8859_8 codec - 'csisolatinhebrew' : 'iso8859_8', - 'hebrew' : 'iso8859_8', - 'iso_8859_8' : 'iso8859_8', - 'iso_8859_8_1988' : 'iso8859_8', - 'iso_ir_138' : 'iso8859_8', - - # iso8859_9 codec - 'csisolatin5' : 'iso8859_9', - 'iso_8859_9' : 'iso8859_9', - 'iso_8859_9_1989' : 'iso8859_9', - 'iso_ir_148' : 'iso8859_9', - 'l5' : 'iso8859_9', - 'latin5' : 'iso8859_9', - - # johab codec - 'cp1361' : 'johab', - 'ms1361' : 'johab', - - # koi8_r codec - 'cskoi8r' : 'koi8_r', - - # latin_1 codec - # - # Note that the latin_1 codec is implemented internally in C and a - # lot faster than the charmap codec iso8859_1 which uses the same - # encoding. This is why we discourage the use of the iso8859_1 - # codec and alias it to latin_1 instead. - # - '8859' : 'latin_1', - 'cp819' : 'latin_1', - 'csisolatin1' : 'latin_1', - 'ibm819' : 'latin_1', - 'iso8859' : 'latin_1', - 'iso8859_1' : 'latin_1', - 'iso_8859_1' : 'latin_1', - 'iso_8859_1_1987' : 'latin_1', - 'iso_ir_100' : 'latin_1', - 'l1' : 'latin_1', - 'latin' : 'latin_1', - 'latin1' : 'latin_1', - - # mac_cyrillic codec - 'maccyrillic' : 'mac_cyrillic', - - # mac_greek codec - 'macgreek' : 'mac_greek', - - # mac_iceland codec - 'maciceland' : 'mac_iceland', - - # mac_latin2 codec - 'maccentraleurope' : 'mac_latin2', - 'maclatin2' : 'mac_latin2', - - # mac_roman codec - 'macroman' : 'mac_roman', - - # mac_turkish codec - 'macturkish' : 'mac_turkish', - - # mbcs codec - 'dbcs' : 'mbcs', - - # ptcp154 codec - 'csptcp154' : 'ptcp154', - 'pt154' : 'ptcp154', - 'cp154' : 'ptcp154', - 'cyrillic-asian' : 'ptcp154', - - # quopri_codec codec - 'quopri' : 'quopri_codec', - 'quoted_printable' : 'quopri_codec', - 'quotedprintable' : 'quopri_codec', - - # rot_13 codec - 'rot13' : 'rot_13', - - # shift_jis codec - 'csshiftjis' : 'shift_jis', - 'shiftjis' : 'shift_jis', - 'sjis' : 'shift_jis', - 's_jis' : 'shift_jis', - - # shift_jis_2004 codec - 'shiftjis2004' : 'shift_jis_2004', - 'sjis_2004' : 'shift_jis_2004', - 's_jis_2004' : 'shift_jis_2004', - - # shift_jisx0213 codec - 'shiftjisx0213' : 'shift_jisx0213', - 'sjisx0213' : 'shift_jisx0213', - 's_jisx0213' : 'shift_jisx0213', - - # tactis codec - 'tis260' : 'tactis', - - # tis_620 codec - 'tis620' : 'tis_620', - 'tis_620_0' : 'tis_620', - 'tis_620_2529_0' : 'tis_620', - 'tis_620_2529_1' : 'tis_620', - 'iso_ir_166' : 'tis_620', - - # utf_16 codec - 'u16' : 'utf_16', - 'utf16' : 'utf_16', - - # utf_16_be codec - 'unicodebigunmarked' : 'utf_16_be', - 'utf_16be' : 'utf_16_be', - - # utf_16_le codec - 'unicodelittleunmarked' : 'utf_16_le', - 'utf_16le' : 'utf_16_le', - - # utf_32 codec - 'u32' : 'utf_32', - 'utf32' : 'utf_32', - - # utf_32_be codec - 'utf_32be' : 'utf_32_be', - - # utf_32_le codec - 'utf_32le' : 'utf_32_le', - - # utf_7 codec - 'u7' : 'utf_7', - 'utf7' : 'utf_7', - 'unicode_1_1_utf_7' : 'utf_7', - - # utf_8 codec - 'u8' : 'utf_8', - 'utf' : 'utf_8', - 'utf8' : 'utf_8', - 'utf8_ucs2' : 'utf_8', - 'utf8_ucs4' : 'utf_8', - - # uu_codec codec - 'uu' : 'uu_codec', - - # zlib_codec codec - 'zip' : 'zlib_codec', - 'zlib' : 'zlib_codec', - -} diff --git a/icarus-miner/data/usr/lib/python2.6/encodings/aliases.pyc b/icarus-miner/data/usr/lib/python2.6/encodings/aliases.pyc deleted file mode 100644 index 97af325..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/encodings/aliases.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/encodings/ascii.py b/icarus-miner/data/usr/lib/python2.6/encodings/ascii.py deleted file mode 100644 index 2033cde..0000000 --- a/icarus-miner/data/usr/lib/python2.6/encodings/ascii.py +++ /dev/null @@ -1,50 +0,0 @@ -""" Python 'ascii' Codec - - -Written by Marc-Andre Lemburg (mal@lemburg.com). - -(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. - -""" -import codecs - -### Codec APIs - -class Codec(codecs.Codec): - - # Note: Binding these as C functions will result in the class not - # converting them to methods. This is intended. - encode = codecs.ascii_encode - decode = codecs.ascii_decode - -class IncrementalEncoder(codecs.IncrementalEncoder): - def encode(self, input, final=False): - return codecs.ascii_encode(input, self.errors)[0] - -class IncrementalDecoder(codecs.IncrementalDecoder): - def decode(self, input, final=False): - return codecs.ascii_decode(input, self.errors)[0] - -class StreamWriter(Codec,codecs.StreamWriter): - pass - -class StreamReader(Codec,codecs.StreamReader): - pass - -class StreamConverter(StreamWriter,StreamReader): - - encode = codecs.ascii_decode - decode = codecs.ascii_encode - -### encodings module API - -def getregentry(): - return codecs.CodecInfo( - name='ascii', - encode=Codec.encode, - decode=Codec.decode, - incrementalencoder=IncrementalEncoder, - incrementaldecoder=IncrementalDecoder, - streamwriter=StreamWriter, - streamreader=StreamReader, - ) diff --git a/icarus-miner/data/usr/lib/python2.6/encodings/ascii.pyc b/icarus-miner/data/usr/lib/python2.6/encodings/ascii.pyc deleted file mode 100644 index 7f2aab9..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/encodings/ascii.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/encodings/hex_codec.py b/icarus-miner/data/usr/lib/python2.6/encodings/hex_codec.py deleted file mode 100644 index 91b38d9..0000000 --- a/icarus-miner/data/usr/lib/python2.6/encodings/hex_codec.py +++ /dev/null @@ -1,79 +0,0 @@ -""" Python 'hex_codec' Codec - 2-digit hex content transfer encoding - - Unlike most of the other codecs which target Unicode, this codec - will return Python string objects for both encode and decode. - - Written by Marc-Andre Lemburg (mal@lemburg.com). - -""" -import codecs, binascii - -### Codec APIs - -def hex_encode(input,errors='strict'): - - """ Encodes the object input and returns a tuple (output - object, length consumed). - - errors defines the error handling to apply. It defaults to - 'strict' handling which is the only currently supported - error handling for this codec. - - """ - assert errors == 'strict' - output = binascii.b2a_hex(input) - return (output, len(input)) - -def hex_decode(input,errors='strict'): - - """ Decodes the object input and returns a tuple (output - object, length consumed). - - input must be an object which provides the bf_getreadbuf - buffer slot. Python strings, buffer objects and memory - mapped files are examples of objects providing this slot. - - errors defines the error handling to apply. It defaults to - 'strict' handling which is the only currently supported - error handling for this codec. - - """ - assert errors == 'strict' - output = binascii.a2b_hex(input) - return (output, len(input)) - -class Codec(codecs.Codec): - - def encode(self, input,errors='strict'): - return hex_encode(input,errors) - def decode(self, input,errors='strict'): - return hex_decode(input,errors) - -class IncrementalEncoder(codecs.IncrementalEncoder): - def encode(self, input, final=False): - assert self.errors == 'strict' - return binascii.b2a_hex(input) - -class IncrementalDecoder(codecs.IncrementalDecoder): - def decode(self, input, final=False): - assert self.errors == 'strict' - return binascii.a2b_hex(input) - -class StreamWriter(Codec,codecs.StreamWriter): - pass - -class StreamReader(Codec,codecs.StreamReader): - pass - -### encodings module API - -def getregentry(): - return codecs.CodecInfo( - name='hex', - encode=hex_encode, - decode=hex_decode, - incrementalencoder=IncrementalEncoder, - incrementaldecoder=IncrementalDecoder, - streamwriter=StreamWriter, - streamreader=StreamReader, - ) diff --git a/icarus-miner/data/usr/lib/python2.6/encodings/hex_codec.pyc b/icarus-miner/data/usr/lib/python2.6/encodings/hex_codec.pyc deleted file mode 100644 index 9cf6254..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/encodings/hex_codec.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/functools.py b/icarus-miner/data/usr/lib/python2.6/functools.py deleted file mode 100644 index a54f030..0000000 --- a/icarus-miner/data/usr/lib/python2.6/functools.py +++ /dev/null @@ -1,51 +0,0 @@ -"""functools.py - Tools for working with functions and callable objects -""" -# Python module wrapper for _functools C module -# to allow utilities written in Python to be added -# to the functools module. -# Written by Nick Coghlan -# Copyright (C) 2006 Python Software Foundation. -# See C source code for _functools credits/copyright - -from _functools import partial, reduce - -# update_wrapper() and wraps() are tools to help write -# wrapper functions that can handle naive introspection - -WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__') -WRAPPER_UPDATES = ('__dict__',) -def update_wrapper(wrapper, - wrapped, - assigned = WRAPPER_ASSIGNMENTS, - updated = WRAPPER_UPDATES): - """Update a wrapper function to look like the wrapped function - - wrapper is the function to be updated - wrapped is the original function - assigned is a tuple naming the attributes assigned directly - from the wrapped function to the wrapper function (defaults to - functools.WRAPPER_ASSIGNMENTS) - updated is a tuple naming the attributes of the wrapper that - are updated with the corresponding attribute from the wrapped - function (defaults to functools.WRAPPER_UPDATES) - """ - for attr in assigned: - setattr(wrapper, attr, getattr(wrapped, attr)) - for attr in updated: - getattr(wrapper, attr).update(getattr(wrapped, attr, {})) - # Return the wrapper so this can be used as a decorator via partial() - return wrapper - -def wraps(wrapped, - assigned = WRAPPER_ASSIGNMENTS, - updated = WRAPPER_UPDATES): - """Decorator factory to apply update_wrapper() to a wrapper function - - Returns a decorator that invokes update_wrapper() with the decorated - function as the wrapper argument and the arguments to wraps() as the - remaining arguments. Default arguments are as for update_wrapper(). - This is a convenience function to simplify applying partial() to - update_wrapper(). - """ - return partial(update_wrapper, wrapped=wrapped, - assigned=assigned, updated=updated) diff --git a/icarus-miner/data/usr/lib/python2.6/functools.pyc b/icarus-miner/data/usr/lib/python2.6/functools.pyc deleted file mode 100644 index 09ed969..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/functools.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/genericpath.py b/icarus-miner/data/usr/lib/python2.6/genericpath.py deleted file mode 100644 index 73d7b26..0000000 --- a/icarus-miner/data/usr/lib/python2.6/genericpath.py +++ /dev/null @@ -1,105 +0,0 @@ -""" -Path operations common to more than one OS -Do not use directly. The OS specific modules import the appropriate -functions from this module themselves. -""" -import os -import stat - -__all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime', - 'getsize', 'isdir', 'isfile'] - - -# Does a path exist? -# This is false for dangling symbolic links on systems that support them. -def exists(path): - """Test whether a path exists. Returns False for broken symbolic links""" - try: - st = os.stat(path) - except os.error: - return False - return True - - -# This follows symbolic links, so both islink() and isdir() can be true -# for the same path ono systems that support symlinks -def isfile(path): - """Test whether a path is a regular file""" - try: - st = os.stat(path) - except os.error: - return False - return stat.S_ISREG(st.st_mode) - - -# Is a path a directory? -# This follows symbolic links, so both islink() and isdir() -# can be true for the same path on systems that support symlinks -def isdir(s): - """Return true if the pathname refers to an existing directory.""" - try: - st = os.stat(s) - except os.error: - return False - return stat.S_ISDIR(st.st_mode) - - -def getsize(filename): - """Return the size of a file, reported by os.stat().""" - return os.stat(filename).st_size - - -def getmtime(filename): - """Return the last modification time of a file, reported by os.stat().""" - return os.stat(filename).st_mtime - - -def getatime(filename): - """Return the last access time of a file, reported by os.stat().""" - return os.stat(filename).st_atime - - -def getctime(filename): - """Return the metadata change time of a file, reported by os.stat().""" - return os.stat(filename).st_ctime - - -# Return the longest prefix of all list elements. -def commonprefix(m): - "Given a list of pathnames, returns the longest common leading component" - if not m: return '' - s1 = min(m) - s2 = max(m) - for i, c in enumerate(s1): - if c != s2[i]: - return s1[:i] - return s1 - -# Split a path in root and extension. -# The extension is everything starting at the last dot in the last -# pathname component; the root is everything before that. -# It is always true that root + ext == p. - -# Generic implementation of splitext, to be parametrized with -# the separators -def _splitext(p, sep, altsep, extsep): - """Split the extension from a pathname. - - Extension is everything from the last dot to the end, ignoring - leading dots. Returns "(root, ext)"; ext may be empty.""" - - sepIndex = p.rfind(sep) - if altsep: - altsepIndex = p.rfind(altsep) - sepIndex = max(sepIndex, altsepIndex) - - dotIndex = p.rfind(extsep) - if dotIndex > sepIndex: - # skip all leading dots - filenameIndex = sepIndex + 1 - while filenameIndex < dotIndex: - if p[filenameIndex] != extsep: - return p[:dotIndex], p[dotIndex:] - filenameIndex += 1 - - return p, '' diff --git a/icarus-miner/data/usr/lib/python2.6/genericpath.pyc b/icarus-miner/data/usr/lib/python2.6/genericpath.pyc deleted file mode 100644 index 1534d90..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/genericpath.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/heapq.py b/icarus-miner/data/usr/lib/python2.6/heapq.py deleted file mode 100644 index b36aabd..0000000 --- a/icarus-miner/data/usr/lib/python2.6/heapq.py +++ /dev/null @@ -1,393 +0,0 @@ -# -*- coding: Latin-1 -*- - -"""Heap queue algorithm (a.k.a. priority queue). - -Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for -all k, counting elements from 0. For the sake of comparison, -non-existing elements are considered to be infinite. The interesting -property of a heap is that a[0] is always its smallest element. - -Usage: - -heap = [] # creates an empty heap -heappush(heap, item) # pushes a new item on the heap -item = heappop(heap) # pops the smallest item from the heap -item = heap[0] # smallest item on the heap without popping it -heapify(x) # transforms list into a heap, in-place, in linear time -item = heapreplace(heap, item) # pops and returns smallest item, and adds - # new item; the heap size is unchanged - -Our API differs from textbook heap algorithms as follows: - -- We use 0-based indexing. This makes the relationship between the - index for a node and the indexes for its children slightly less - obvious, but is more suitable since Python uses 0-based indexing. - -- Our heappop() method returns the smallest item, not the largest. - -These two make it possible to view the heap as a regular Python list -without surprises: heap[0] is the smallest item, and heap.sort() -maintains the heap invariant! -""" - -# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger - -__about__ = """Heap queues - -[explanation by François Pinard] - -Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for -all k, counting elements from 0. For the sake of comparison, -non-existing elements are considered to be infinite. The interesting -property of a heap is that a[0] is always its smallest element. - -The strange invariant above is meant to be an efficient memory -representation for a tournament. The numbers below are `k', not a[k]: - - 0 - - 1 2 - - 3 4 5 6 - - 7 8 9 10 11 12 13 14 - - 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 - - -In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In -an usual binary tournament we see in sports, each cell is the winner -over the two cells it tops, and we can trace the winner down the tree -to see all opponents s/he had. However, in many computer applications -of such tournaments, we do not need to trace the history of a winner. -To be more memory efficient, when a winner is promoted, we try to -replace it by something else at a lower level, and the rule becomes -that a cell and the two cells it tops contain three different items, -but the top cell "wins" over the two topped cells. - -If this heap invariant is protected at all time, index 0 is clearly -the overall winner. The simplest algorithmic way to remove it and -find the "next" winner is to move some loser (let's say cell 30 in the -diagram above) into the 0 position, and then percolate this new 0 down -the tree, exchanging values, until the invariant is re-established. -This is clearly logarithmic on the total number of items in the tree. -By iterating over all items, you get an O(n ln n) sort. - -A nice feature of this sort is that you can efficiently insert new -items while the sort is going on, provided that the inserted items are -not "better" than the last 0'th element you extracted. This is -especially useful in simulation contexts, where the tree holds all -incoming events, and the "win" condition means the smallest scheduled -time. When an event schedule other events for execution, they are -scheduled into the future, so they can easily go into the heap. So, a -heap is a good structure for implementing schedulers (this is what I -used for my MIDI sequencer :-). - -Various structures for implementing schedulers have been extensively -studied, and heaps are good for this, as they are reasonably speedy, -the speed is almost constant, and the worst case is not much different -than the average case. However, there are other representations which -are more efficient overall, yet the worst cases might be terrible. - -Heaps are also very useful in big disk sorts. You most probably all -know that a big sort implies producing "runs" (which are pre-sorted -sequences, which size is usually related to the amount of CPU memory), -followed by a merging passes for these runs, which merging is often -very cleverly organised[1]. It is very important that the initial -sort produces the longest runs possible. Tournaments are a good way -to that. If, using all the memory available to hold a tournament, you -replace and percolate items that happen to fit the current run, you'll -produce runs which are twice the size of the memory for random input, -and much better for input fuzzily ordered. - -Moreover, if you output the 0'th item on disk and get an input which -may not fit in the current tournament (because the value "wins" over -the last output value), it cannot fit in the heap, so the size of the -heap decreases. The freed memory could be cleverly reused immediately -for progressively building a second heap, which grows at exactly the -same rate the first heap is melting. When the first heap completely -vanishes, you switch heaps and start a new run. Clever and quite -effective! - -In a word, heaps are useful memory structures to know. I use them in -a few applications, and I think it is good to keep a `heap' module -around. :-) - --------------------- -[1] The disk balancing algorithms which are current, nowadays, are -more annoying than clever, and this is a consequence of the seeking -capabilities of the disks. On devices which cannot seek, like big -tape drives, the story was quite different, and one had to be very -clever to ensure (far in advance) that each tape movement will be the -most effective possible (that is, will best participate at -"progressing" the merge). Some tapes were even able to read -backwards, and this was also used to avoid the rewinding time. -Believe me, real good tape sorts were quite spectacular to watch! -From all times, sorting has always been a Great Art! :-) -""" - -__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge', - 'nlargest', 'nsmallest', 'heappushpop'] - -from itertools import islice, repeat, count, imap, izip, tee -from operator import itemgetter, neg -import bisect - -def heappush(heap, item): - """Push item onto heap, maintaining the heap invariant.""" - heap.append(item) - _siftdown(heap, 0, len(heap)-1) - -def heappop(heap): - """Pop the smallest item off the heap, maintaining the heap invariant.""" - lastelt = heap.pop() # raises appropriate IndexError if heap is empty - if heap: - returnitem = heap[0] - heap[0] = lastelt - _siftup(heap, 0) - else: - returnitem = lastelt - return returnitem - -def heapreplace(heap, item): - """Pop and return the current smallest value, and add the new item. - - This is more efficient than heappop() followed by heappush(), and can be - more appropriate when using a fixed-size heap. Note that the value - returned may be larger than item! That constrains reasonable uses of - this routine unless written as part of a conditional replacement: - - if item > heap[0]: - item = heapreplace(heap, item) - """ - returnitem = heap[0] # raises appropriate IndexError if heap is empty - heap[0] = item - _siftup(heap, 0) - return returnitem - -def heappushpop(heap, item): - """Fast version of a heappush followed by a heappop.""" - if heap and heap[0] < item: - item, heap[0] = heap[0], item - _siftup(heap, 0) - return item - -def heapify(x): - """Transform list into a heap, in-place, in O(len(heap)) time.""" - n = len(x) - # Transform bottom-up. The largest index there's any point to looking at - # is the largest with a child index in-range, so must have 2*i + 1 < n, - # or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so - # j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is - # (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1. - for i in reversed(xrange(n//2)): - _siftup(x, i) - -def nlargest(n, iterable): - """Find the n largest elements in a dataset. - - Equivalent to: sorted(iterable, reverse=True)[:n] - """ - it = iter(iterable) - result = list(islice(it, n)) - if not result: - return result - heapify(result) - _heappushpop = heappushpop - for elem in it: - _heappushpop(result, elem) - result.sort(reverse=True) - return result - -def nsmallest(n, iterable): - """Find the n smallest elements in a dataset. - - Equivalent to: sorted(iterable)[:n] - """ - if hasattr(iterable, '__len__') and n * 10 <= len(iterable): - # For smaller values of n, the bisect method is faster than a minheap. - # It is also memory efficient, consuming only n elements of space. - it = iter(iterable) - result = sorted(islice(it, 0, n)) - if not result: - return result - insort = bisect.insort - pop = result.pop - los = result[-1] # los --> Largest of the nsmallest - for elem in it: - if los <= elem: - continue - insort(result, elem) - pop() - los = result[-1] - return result - # An alternative approach manifests the whole iterable in memory but - # saves comparisons by heapifying all at once. Also, saves time - # over bisect.insort() which has O(n) data movement time for every - # insertion. Finding the n smallest of an m length iterable requires - # O(m) + O(n log m) comparisons. - h = list(iterable) - heapify(h) - return map(heappop, repeat(h, min(n, len(h)))) - -# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos -# is the index of a leaf with a possibly out-of-order value. Restore the -# heap invariant. -def _siftdown(heap, startpos, pos): - newitem = heap[pos] - # Follow the path to the root, moving parents down until finding a place - # newitem fits. - while pos > startpos: - parentpos = (pos - 1) >> 1 - parent = heap[parentpos] - if newitem < parent: - heap[pos] = parent - pos = parentpos - continue - break - heap[pos] = newitem - -# The child indices of heap index pos are already heaps, and we want to make -# a heap at index pos too. We do this by bubbling the smaller child of -# pos up (and so on with that child's children, etc) until hitting a leaf, -# then using _siftdown to move the oddball originally at index pos into place. -# -# We *could* break out of the loop as soon as we find a pos where newitem <= -# both its children, but turns out that's not a good idea, and despite that -# many books write the algorithm that way. During a heap pop, the last array -# element is sifted in, and that tends to be large, so that comparing it -# against values starting from the root usually doesn't pay (= usually doesn't -# get us out of the loop early). See Knuth, Volume 3, where this is -# explained and quantified in an exercise. -# -# Cutting the # of comparisons is important, since these routines have no -# way to extract "the priority" from an array element, so that intelligence -# is likely to be hiding in custom __cmp__ methods, or in array elements -# storing (priority, record) tuples. Comparisons are thus potentially -# expensive. -# -# On random arrays of length 1000, making this change cut the number of -# comparisons made by heapify() a little, and those made by exhaustive -# heappop() a lot, in accord with theory. Here are typical results from 3 -# runs (3 just to demonstrate how small the variance is): -# -# Compares needed by heapify Compares needed by 1000 heappops -# -------------------------- -------------------------------- -# 1837 cut to 1663 14996 cut to 8680 -# 1855 cut to 1659 14966 cut to 8678 -# 1847 cut to 1660 15024 cut to 8703 -# -# Building the heap by using heappush() 1000 times instead required -# 2198, 2148, and 2219 compares: heapify() is more efficient, when -# you can use it. -# -# The total compares needed by list.sort() on the same lists were 8627, -# 8627, and 8632 (this should be compared to the sum of heapify() and -# heappop() compares): list.sort() is (unsurprisingly!) more efficient -# for sorting. - -def _siftup(heap, pos): - endpos = len(heap) - startpos = pos - newitem = heap[pos] - # Bubble up the smaller child until hitting a leaf. - childpos = 2*pos + 1 # leftmost child position - while childpos < endpos: - # Set childpos to index of smaller child. - rightpos = childpos + 1 - if rightpos < endpos and not heap[childpos] < heap[rightpos]: - childpos = rightpos - # Move the smaller child up. - heap[pos] = heap[childpos] - pos = childpos - childpos = 2*pos + 1 - # The leaf at pos is empty now. Put newitem there, and bubble it up - # to its final resting place (by sifting its parents down). - heap[pos] = newitem - _siftdown(heap, startpos, pos) - -# If available, use C implementation -try: - from _heapq import heappush, heappop, heapify, heapreplace, nlargest, nsmallest, heappushpop -except ImportError: - pass - -def merge(*iterables): - '''Merge multiple sorted inputs into a single sorted output. - - Similar to sorted(itertools.chain(*iterables)) but returns a generator, - does not pull the data into memory all at once, and assumes that each of - the input streams is already sorted (smallest to largest). - - >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25])) - [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25] - - ''' - _heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration - - h = [] - h_append = h.append - for itnum, it in enumerate(map(iter, iterables)): - try: - next = it.next - h_append([next(), itnum, next]) - except _StopIteration: - pass - heapify(h) - - while 1: - try: - while 1: - v, itnum, next = s = h[0] # raises IndexError when h is empty - yield v - s[0] = next() # raises StopIteration when exhausted - _heapreplace(h, s) # restore heap condition - except _StopIteration: - _heappop(h) # remove empty iterator - except IndexError: - return - -# Extend the implementations of nsmallest and nlargest to use a key= argument -_nsmallest = nsmallest -def nsmallest(n, iterable, key=None): - """Find the n smallest elements in a dataset. - - Equivalent to: sorted(iterable, key=key)[:n] - """ - if key is None: - it = izip(iterable, count()) # decorate - result = _nsmallest(n, it) - return map(itemgetter(0), result) # undecorate - in1, in2 = tee(iterable) - it = izip(imap(key, in1), count(), in2) # decorate - result = _nsmallest(n, it) - return map(itemgetter(2), result) # undecorate - -_nlargest = nlargest -def nlargest(n, iterable, key=None): - """Find the n largest elements in a dataset. - - Equivalent to: sorted(iterable, key=key, reverse=True)[:n] - """ - if key is None: - it = izip(iterable, imap(neg, count())) # decorate - result = _nlargest(n, it) - return map(itemgetter(0), result) # undecorate - in1, in2 = tee(iterable) - it = izip(imap(key, in1), imap(neg, count()), in2) # decorate - result = _nlargest(n, it) - return map(itemgetter(2), result) # undecorate - -if __name__ == "__main__": - # Simple sanity test - heap = [] - data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0] - for item in data: - heappush(heap, item) - sort = [] - while heap: - sort.append(heappop(heap)) - print sort - - import doctest - doctest.testmod() diff --git a/icarus-miner/data/usr/lib/python2.6/heapq.pyc b/icarus-miner/data/usr/lib/python2.6/heapq.pyc deleted file mode 100644 index 9f3471f..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/heapq.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/hex_codec.py b/icarus-miner/data/usr/lib/python2.6/hex_codec.py deleted file mode 100644 index 91b38d9..0000000 --- a/icarus-miner/data/usr/lib/python2.6/hex_codec.py +++ /dev/null @@ -1,79 +0,0 @@ -""" Python 'hex_codec' Codec - 2-digit hex content transfer encoding - - Unlike most of the other codecs which target Unicode, this codec - will return Python string objects for both encode and decode. - - Written by Marc-Andre Lemburg (mal@lemburg.com). - -""" -import codecs, binascii - -### Codec APIs - -def hex_encode(input,errors='strict'): - - """ Encodes the object input and returns a tuple (output - object, length consumed). - - errors defines the error handling to apply. It defaults to - 'strict' handling which is the only currently supported - error handling for this codec. - - """ - assert errors == 'strict' - output = binascii.b2a_hex(input) - return (output, len(input)) - -def hex_decode(input,errors='strict'): - - """ Decodes the object input and returns a tuple (output - object, length consumed). - - input must be an object which provides the bf_getreadbuf - buffer slot. Python strings, buffer objects and memory - mapped files are examples of objects providing this slot. - - errors defines the error handling to apply. It defaults to - 'strict' handling which is the only currently supported - error handling for this codec. - - """ - assert errors == 'strict' - output = binascii.a2b_hex(input) - return (output, len(input)) - -class Codec(codecs.Codec): - - def encode(self, input,errors='strict'): - return hex_encode(input,errors) - def decode(self, input,errors='strict'): - return hex_decode(input,errors) - -class IncrementalEncoder(codecs.IncrementalEncoder): - def encode(self, input, final=False): - assert self.errors == 'strict' - return binascii.b2a_hex(input) - -class IncrementalDecoder(codecs.IncrementalDecoder): - def decode(self, input, final=False): - assert self.errors == 'strict' - return binascii.a2b_hex(input) - -class StreamWriter(Codec,codecs.StreamWriter): - pass - -class StreamReader(Codec,codecs.StreamReader): - pass - -### encodings module API - -def getregentry(): - return codecs.CodecInfo( - name='hex', - encode=hex_encode, - decode=hex_decode, - incrementalencoder=IncrementalEncoder, - incrementaldecoder=IncrementalDecoder, - streamwriter=StreamWriter, - streamreader=StreamReader, - ) diff --git a/icarus-miner/data/usr/lib/python2.6/hex_codec.pyc b/icarus-miner/data/usr/lib/python2.6/hex_codec.pyc deleted file mode 100644 index d056e89..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/hex_codec.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/httplib.py b/icarus-miner/data/usr/lib/python2.6/httplib.py deleted file mode 100644 index 3623bc3..0000000 --- a/icarus-miner/data/usr/lib/python2.6/httplib.py +++ /dev/null @@ -1,1327 +0,0 @@ -"""HTTP/1.1 client library - - - - -HTTPConnection goes through a number of "states", which define when a client -may legally make another request or fetch the response for a particular -request. This diagram details these state transitions: - - (null) - | - | HTTPConnection() - v - Idle - | - | putrequest() - v - Request-started - | - | ( putheader() )* endheaders() - v - Request-sent - | - | response = getresponse() - v - Unread-response [Response-headers-read] - |\____________________ - | | - | response.read() | putrequest() - v v - Idle Req-started-unread-response - ______/| - / | - response.read() | | ( putheader() )* endheaders() - v v - Request-started Req-sent-unread-response - | - | response.read() - v - Request-sent - -This diagram presents the following rules: - -- a second request may not be started until {response-headers-read} - -- a response [object] cannot be retrieved until {request-sent} - -- there is no differentiation between an unread response body and a - partially read response body - -Note: this enforcement is applied by the HTTPConnection class. The - HTTPResponse class does not enforce this state machine, which - implies sophisticated clients may accelerate the request/response - pipeline. Caution should be taken, though: accelerating the states - beyond the above pattern may imply knowledge of the server's - connection-close behavior for certain requests. For example, it - is impossible to tell whether the server will close the connection - UNTIL the response headers have been read; this means that further - requests cannot be placed into the pipeline until it is known that - the server will NOT be closing the connection. - -Logical State __state __response -------------- ------- ---------- -Idle _CS_IDLE None -Request-started _CS_REQ_STARTED None -Request-sent _CS_REQ_SENT None -Unread-response _CS_IDLE -Req-started-unread-response _CS_REQ_STARTED -Req-sent-unread-response _CS_REQ_SENT -""" - -from array import array -import socket -from sys import py3kwarning -from urlparse import urlsplit -import warnings -with warnings.catch_warnings(): - if py3kwarning: - warnings.filterwarnings("ignore", ".*mimetools has been removed", - DeprecationWarning) - import mimetools - -try: - from cStringIO import StringIO -except ImportError: - from StringIO import StringIO - -__all__ = ["HTTP", "HTTPResponse", "HTTPConnection", - "HTTPException", "NotConnected", "UnknownProtocol", - "UnknownTransferEncoding", "UnimplementedFileMode", - "IncompleteRead", "InvalidURL", "ImproperConnectionState", - "CannotSendRequest", "CannotSendHeader", "ResponseNotReady", - "BadStatusLine", "error", "responses"] - -HTTP_PORT = 80 -HTTPS_PORT = 443 - -_UNKNOWN = 'UNKNOWN' - -# connection states -_CS_IDLE = 'Idle' -_CS_REQ_STARTED = 'Request-started' -_CS_REQ_SENT = 'Request-sent' - -# status codes -# informational -CONTINUE = 100 -SWITCHING_PROTOCOLS = 101 -PROCESSING = 102 - -# successful -OK = 200 -CREATED = 201 -ACCEPTED = 202 -NON_AUTHORITATIVE_INFORMATION = 203 -NO_CONTENT = 204 -RESET_CONTENT = 205 -PARTIAL_CONTENT = 206 -MULTI_STATUS = 207 -IM_USED = 226 - -# redirection -MULTIPLE_CHOICES = 300 -MOVED_PERMANENTLY = 301 -FOUND = 302 -SEE_OTHER = 303 -NOT_MODIFIED = 304 -USE_PROXY = 305 -TEMPORARY_REDIRECT = 307 - -# client error -BAD_REQUEST = 400 -UNAUTHORIZED = 401 -PAYMENT_REQUIRED = 402 -FORBIDDEN = 403 -NOT_FOUND = 404 -METHOD_NOT_ALLOWED = 405 -NOT_ACCEPTABLE = 406 -PROXY_AUTHENTICATION_REQUIRED = 407 -REQUEST_TIMEOUT = 408 -CONFLICT = 409 -GONE = 410 -LENGTH_REQUIRED = 411 -PRECONDITION_FAILED = 412 -REQUEST_ENTITY_TOO_LARGE = 413 -REQUEST_URI_TOO_LONG = 414 -UNSUPPORTED_MEDIA_TYPE = 415 -REQUESTED_RANGE_NOT_SATISFIABLE = 416 -EXPECTATION_FAILED = 417 -UNPROCESSABLE_ENTITY = 422 -LOCKED = 423 -FAILED_DEPENDENCY = 424 -UPGRADE_REQUIRED = 426 - -# server error -INTERNAL_SERVER_ERROR = 500 -NOT_IMPLEMENTED = 501 -BAD_GATEWAY = 502 -SERVICE_UNAVAILABLE = 503 -GATEWAY_TIMEOUT = 504 -HTTP_VERSION_NOT_SUPPORTED = 505 -INSUFFICIENT_STORAGE = 507 -NOT_EXTENDED = 510 - -# Mapping status codes to official W3C names -responses = { - 100: 'Continue', - 101: 'Switching Protocols', - - 200: 'OK', - 201: 'Created', - 202: 'Accepted', - 203: 'Non-Authoritative Information', - 204: 'No Content', - 205: 'Reset Content', - 206: 'Partial Content', - - 300: 'Multiple Choices', - 301: 'Moved Permanently', - 302: 'Found', - 303: 'See Other', - 304: 'Not Modified', - 305: 'Use Proxy', - 306: '(Unused)', - 307: 'Temporary Redirect', - - 400: 'Bad Request', - 401: 'Unauthorized', - 402: 'Payment Required', - 403: 'Forbidden', - 404: 'Not Found', - 405: 'Method Not Allowed', - 406: 'Not Acceptable', - 407: 'Proxy Authentication Required', - 408: 'Request Timeout', - 409: 'Conflict', - 410: 'Gone', - 411: 'Length Required', - 412: 'Precondition Failed', - 413: 'Request Entity Too Large', - 414: 'Request-URI Too Long', - 415: 'Unsupported Media Type', - 416: 'Requested Range Not Satisfiable', - 417: 'Expectation Failed', - - 500: 'Internal Server Error', - 501: 'Not Implemented', - 502: 'Bad Gateway', - 503: 'Service Unavailable', - 504: 'Gateway Timeout', - 505: 'HTTP Version Not Supported', -} - -# maximal amount of data to read at one time in _safe_read -MAXAMOUNT = 1048576 - -class HTTPMessage(mimetools.Message): - - def addheader(self, key, value): - """Add header for field key handling repeats.""" - prev = self.dict.get(key) - if prev is None: - self.dict[key] = value - else: - combined = ", ".join((prev, value)) - self.dict[key] = combined - - def addcontinue(self, key, more): - """Add more field data from a continuation line.""" - prev = self.dict[key] - self.dict[key] = prev + "\n " + more - - def readheaders(self): - """Read header lines. - - Read header lines up to the entirely blank line that terminates them. - The (normally blank) line that ends the headers is skipped, but not - included in the returned list. If a non-header line ends the headers, - (which is an error), an attempt is made to backspace over it; it is - never included in the returned list. - - The variable self.status is set to the empty string if all went well, - otherwise it is an error message. The variable self.headers is a - completely uninterpreted list of lines contained in the header (so - printing them will reproduce the header exactly as it appears in the - file). - - If multiple header fields with the same name occur, they are combined - according to the rules in RFC 2616 sec 4.2: - - Appending each subsequent field-value to the first, each separated - by a comma. The order in which header fields with the same field-name - are received is significant to the interpretation of the combined - field value. - """ - # XXX The implementation overrides the readheaders() method of - # rfc822.Message. The base class design isn't amenable to - # customized behavior here so the method here is a copy of the - # base class code with a few small changes. - - self.dict = {} - self.unixfrom = '' - self.headers = hlist = [] - self.status = '' - headerseen = "" - firstline = 1 - startofline = unread = tell = None - if hasattr(self.fp, 'unread'): - unread = self.fp.unread - elif self.seekable: - tell = self.fp.tell - while True: - if tell: - try: - startofline = tell() - except IOError: - startofline = tell = None - self.seekable = 0 - line = self.fp.readline() - if not line: - self.status = 'EOF in headers' - break - # Skip unix From name time lines - if firstline and line.startswith('From '): - self.unixfrom = self.unixfrom + line - continue - firstline = 0 - if headerseen and line[0] in ' \t': - # XXX Not sure if continuation lines are handled properly - # for http and/or for repeating headers - # It's a continuation line. - hlist.append(line) - self.addcontinue(headerseen, line.strip()) - continue - elif self.iscomment(line): - # It's a comment. Ignore it. - continue - elif self.islast(line): - # Note! No pushback here! The delimiter line gets eaten. - break - headerseen = self.isheader(line) - if headerseen: - # It's a legal header line, save it. - hlist.append(line) - self.addheader(headerseen, line[len(headerseen)+1:].strip()) - continue - else: - # It's not a header line; throw it back and stop here. - if not self.dict: - self.status = 'No headers' - else: - self.status = 'Non-header line where header expected' - # Try to undo the read. - if unread: - unread(line) - elif tell: - self.fp.seek(startofline) - else: - self.status = self.status + '; bad seek' - break - -class HTTPResponse: - - # strict: If true, raise BadStatusLine if the status line can't be - # parsed as a valid HTTP/1.0 or 1.1 status line. By default it is - # false because it prevents clients from talking to HTTP/0.9 - # servers. Note that a response with a sufficiently corrupted - # status line will look like an HTTP/0.9 response. - - # See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details. - - def __init__(self, sock, debuglevel=0, strict=0, method=None): - self.fp = sock.makefile('rb', 0) - self.debuglevel = debuglevel - self.strict = strict - self._method = method - - self.msg = None - - # from the Status-Line of the response - self.version = _UNKNOWN # HTTP-Version - self.status = _UNKNOWN # Status-Code - self.reason = _UNKNOWN # Reason-Phrase - - self.chunked = _UNKNOWN # is "chunked" being used? - self.chunk_left = _UNKNOWN # bytes left to read in current chunk - self.length = _UNKNOWN # number of bytes left in response - self.will_close = _UNKNOWN # conn will close at end of response - - def _read_status(self): - # Initialize with Simple-Response defaults - line = self.fp.readline() - if self.debuglevel > 0: - print "reply:", repr(line) - if not line: - # Presumably, the server closed the connection before - # sending a valid response. - raise BadStatusLine(line) - try: - [version, status, reason] = line.split(None, 2) - except ValueError: - try: - [version, status] = line.split(None, 1) - reason = "" - except ValueError: - # empty version will cause next test to fail and status - # will be treated as 0.9 response. - version = "" - if not version.startswith('HTTP/'): - if self.strict: - self.close() - raise BadStatusLine(line) - else: - # assume it's a Simple-Response from an 0.9 server - self.fp = LineAndFileWrapper(line, self.fp) - return "HTTP/0.9", 200, "" - - # The status code is a three-digit number - try: - status = int(status) - if status < 100 or status > 999: - raise BadStatusLine(line) - except ValueError: - raise BadStatusLine(line) - return version, status, reason - - def begin(self): - if self.msg is not None: - # we've already started reading the response - return - - # read until we get a non-100 response - while True: - version, status, reason = self._read_status() - if status != CONTINUE: - break - # skip the header from the 100 response - while True: - skip = self.fp.readline().strip() - if not skip: - break - if self.debuglevel > 0: - print "header:", skip - - self.status = status - self.reason = reason.strip() - if version == 'HTTP/1.0': - self.version = 10 - elif version.startswith('HTTP/1.'): - self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1 - elif version == 'HTTP/0.9': - self.version = 9 - else: - raise UnknownProtocol(version) - - if self.version == 9: - self.length = None - self.chunked = 0 - self.will_close = 1 - self.msg = HTTPMessage(StringIO()) - return - - self.msg = HTTPMessage(self.fp, 0) - if self.debuglevel > 0: - for hdr in self.msg.headers: - print "header:", hdr, - - # don't let the msg keep an fp - self.msg.fp = None - - # are we using the chunked-style of transfer encoding? - tr_enc = self.msg.getheader('transfer-encoding') - if tr_enc and tr_enc.lower() == "chunked": - self.chunked = 1 - self.chunk_left = None - else: - self.chunked = 0 - - # will the connection close at the end of the response? - self.will_close = self._check_close() - - # do we have a Content-Length? - # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked" - length = self.msg.getheader('content-length') - if length and not self.chunked: - try: - self.length = int(length) - except ValueError: - self.length = None - else: - if self.length < 0: # ignore nonsensical negative lengths - self.length = None - else: - self.length = None - - # does the body have a fixed length? (of zero) - if (status == NO_CONTENT or status == NOT_MODIFIED or - 100 <= status < 200 or # 1xx codes - self._method == 'HEAD'): - self.length = 0 - - # if the connection remains open, and we aren't using chunked, and - # a content-length was not provided, then assume that the connection - # WILL close. - if not self.will_close and \ - not self.chunked and \ - self.length is None: - self.will_close = 1 - - def _check_close(self): - conn = self.msg.getheader('connection') - if self.version == 11: - # An HTTP/1.1 proxy is assumed to stay open unless - # explicitly closed. - conn = self.msg.getheader('connection') - if conn and "close" in conn.lower(): - return True - return False - - # Some HTTP/1.0 implementations have support for persistent - # connections, using rules different than HTTP/1.1. - - # For older HTTP, Keep-Alive indicates persistent connection. - if self.msg.getheader('keep-alive'): - return False - - # At least Akamai returns a "Connection: Keep-Alive" header, - # which was supposed to be sent by the client. - if conn and "keep-alive" in conn.lower(): - return False - - # Proxy-Connection is a netscape hack. - pconn = self.msg.getheader('proxy-connection') - if pconn and "keep-alive" in pconn.lower(): - return False - - # otherwise, assume it will close - return True - - def close(self): - if self.fp: - self.fp.close() - self.fp = None - - def isclosed(self): - # NOTE: it is possible that we will not ever call self.close(). This - # case occurs when will_close is TRUE, length is None, and we - # read up to the last byte, but NOT past it. - # - # IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be - # called, meaning self.isclosed() is meaningful. - return self.fp is None - - # XXX It would be nice to have readline and __iter__ for this, too. - - def read(self, amt=None): - if self.fp is None: - return '' - - if self.chunked: - return self._read_chunked(amt) - - if amt is None: - # unbounded read - if self.length is None: - s = self.fp.read() - else: - s = self._safe_read(self.length) - self.length = 0 - self.close() # we read everything - return s - - if self.length is not None: - if amt > self.length: - # clip the read to the "end of response" - amt = self.length - - # we do not use _safe_read() here because this may be a .will_close - # connection, and the user is reading more bytes than will be provided - # (for example, reading in 1k chunks) - s = self.fp.read(amt) - if self.length is not None: - self.length -= len(s) - if not self.length: - self.close() - return s - - def _read_chunked(self, amt): - assert self.chunked != _UNKNOWN - chunk_left = self.chunk_left - value = [] - while True: - if chunk_left is None: - line = self.fp.readline() - i = line.find(';') - if i >= 0: - line = line[:i] # strip chunk-extensions - try: - chunk_left = int(line, 16) - except ValueError: - # close the connection as protocol synchronisation is - # probably lost - self.close() - raise IncompleteRead(''.join(value)) - if chunk_left == 0: - break - if amt is None: - value.append(self._safe_read(chunk_left)) - elif amt < chunk_left: - value.append(self._safe_read(amt)) - self.chunk_left = chunk_left - amt - return ''.join(value) - elif amt == chunk_left: - value.append(self._safe_read(amt)) - self._safe_read(2) # toss the CRLF at the end of the chunk - self.chunk_left = None - return ''.join(value) - else: - value.append(self._safe_read(chunk_left)) - amt -= chunk_left - - # we read the whole chunk, get another - self._safe_read(2) # toss the CRLF at the end of the chunk - chunk_left = None - - # read and discard trailer up to the CRLF terminator - ### note: we shouldn't have any trailers! - while True: - line = self.fp.readline() - if not line: - # a vanishingly small number of sites EOF without - # sending the trailer - break - if line == '\r\n': - break - - # we read everything; close the "file" - self.close() - - return ''.join(value) - - def _safe_read(self, amt): - """Read the number of bytes requested, compensating for partial reads. - - Normally, we have a blocking socket, but a read() can be interrupted - by a signal (resulting in a partial read). - - Note that we cannot distinguish between EOF and an interrupt when zero - bytes have been read. IncompleteRead() will be raised in this - situation. - - This function should be used when bytes "should" be present for - reading. If the bytes are truly not available (due to EOF), then the - IncompleteRead exception can be used to detect the problem. - """ - s = [] - while amt > 0: - chunk = self.fp.read(min(amt, MAXAMOUNT)) - if not chunk: - raise IncompleteRead(''.join(s), amt) - s.append(chunk) - amt -= len(chunk) - return ''.join(s) - - def getheader(self, name, default=None): - if self.msg is None: - raise ResponseNotReady() - return self.msg.getheader(name, default) - - def getheaders(self): - """Return list of (header, value) tuples.""" - if self.msg is None: - raise ResponseNotReady() - return self.msg.items() - - -class HTTPConnection: - - _http_vsn = 11 - _http_vsn_str = 'HTTP/1.1' - - response_class = HTTPResponse - default_port = HTTP_PORT - auto_open = 1 - debuglevel = 0 - strict = 0 - - def __init__(self, host, port=None, strict=None, - timeout=socket._GLOBAL_DEFAULT_TIMEOUT): - self.timeout = timeout - self.sock = None - self._buffer = [] - self.__response = None - self.__state = _CS_IDLE - self._method = None - self._tunnel_host = None - self._tunnel_port = None - - self._set_hostport(host, port) - if strict is not None: - self.strict = strict - - def _set_tunnel(self, host, port=None): - """ Sets up the host and the port for the HTTP CONNECT Tunnelling.""" - self._tunnel_host = host - self._tunnel_port = port - - def _set_hostport(self, host, port): - if port is None: - i = host.rfind(':') - j = host.rfind(']') # ipv6 addresses have [...] - if i > j: - try: - port = int(host[i+1:]) - except ValueError: - raise InvalidURL("nonnumeric port: '%s'" % host[i+1:]) - host = host[:i] - else: - port = self.default_port - if host and host[0] == '[' and host[-1] == ']': - host = host[1:-1] - self.host = host - self.port = port - - def set_debuglevel(self, level): - self.debuglevel = level - - def _tunnel(self): - self._set_hostport(self._tunnel_host, self._tunnel_port) - self.send("CONNECT %s:%d HTTP/1.0\r\n\r\n" % (self.host, self.port)) - response = self.response_class(self.sock, strict = self.strict, - method = self._method) - (version, code, message) = response._read_status() - - if code != 200: - self.close() - raise socket.error, "Tunnel connection failed: %d %s" % (code, - message.strip()) - while True: - line = response.fp.readline() - if line == '\r\n': break - - - def connect(self): - """Connect to the host and port specified in __init__.""" - self.sock = socket.create_connection((self.host,self.port), - self.timeout) - - if self._tunnel_host: - self._tunnel() - - def close(self): - """Close the connection to the HTTP server.""" - if self.sock: - self.sock.close() # close it manually... there may be other refs - self.sock = None - if self.__response: - self.__response.close() - self.__response = None - self.__state = _CS_IDLE - - def send(self, str): - """Send `str' to the server.""" - if self.sock is None: - if self.auto_open: - self.connect() - else: - raise NotConnected() - - # send the data to the server. if we get a broken pipe, then close - # the socket. we want to reconnect when somebody tries to send again. - # - # NOTE: we DO propagate the error, though, because we cannot simply - # ignore the error... the caller will know if they can retry. - if self.debuglevel > 0: - print "send:", repr(str) - try: - blocksize=8192 - if hasattr(str,'read') and not isinstance(str, array): - if self.debuglevel > 0: print "sendIng a read()able" - data=str.read(blocksize) - while data: - self.sock.sendall(data) - data=str.read(blocksize) - else: - self.sock.sendall(str) - except socket.error, v: - if v[0] == 32: # Broken pipe - self.close() - raise - - def _output(self, s): - """Add a line of output to the current request buffer. - - Assumes that the line does *not* end with \\r\\n. - """ - self._buffer.append(s) - - def _send_output(self): - """Send the currently buffered request and clear the buffer. - - Appends an extra \\r\\n to the buffer. - """ - self._buffer.extend(("", "")) - msg = "\r\n".join(self._buffer) - del self._buffer[:] - self.send(msg) - - def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0): - """Send a request to the server. - - `method' specifies an HTTP request method, e.g. 'GET'. - `url' specifies the object being requested, e.g. '/index.html'. - `skip_host' if True does not add automatically a 'Host:' header - `skip_accept_encoding' if True does not add automatically an - 'Accept-Encoding:' header - """ - - # if a prior response has been completed, then forget about it. - if self.__response and self.__response.isclosed(): - self.__response = None - - - # in certain cases, we cannot issue another request on this connection. - # this occurs when: - # 1) we are in the process of sending a request. (_CS_REQ_STARTED) - # 2) a response to a previous request has signalled that it is going - # to close the connection upon completion. - # 3) the headers for the previous response have not been read, thus - # we cannot determine whether point (2) is true. (_CS_REQ_SENT) - # - # if there is no prior response, then we can request at will. - # - # if point (2) is true, then we will have passed the socket to the - # response (effectively meaning, "there is no prior response"), and - # will open a new one when a new request is made. - # - # Note: if a prior response exists, then we *can* start a new request. - # We are not allowed to begin fetching the response to this new - # request, however, until that prior response is complete. - # - if self.__state == _CS_IDLE: - self.__state = _CS_REQ_STARTED - else: - raise CannotSendRequest() - - # Save the method we use, we need it later in the response phase - self._method = method - if not url: - url = '/' - str = '%s %s %s' % (method, url, self._http_vsn_str) - - self._output(str) - - if self._http_vsn == 11: - # Issue some standard headers for better HTTP/1.1 compliance - - if not skip_host: - # this header is issued *only* for HTTP/1.1 - # connections. more specifically, this means it is - # only issued when the client uses the new - # HTTPConnection() class. backwards-compat clients - # will be using HTTP/1.0 and those clients may be - # issuing this header themselves. we should NOT issue - # it twice; some web servers (such as Apache) barf - # when they see two Host: headers - - # If we need a non-standard port,include it in the - # header. If the request is going through a proxy, - # but the host of the actual URL, not the host of the - # proxy. - - netloc = '' - if url.startswith('http'): - nil, netloc, nil, nil, nil = urlsplit(url) - - if netloc: - try: - netloc_enc = netloc.encode("ascii") - except UnicodeEncodeError: - netloc_enc = netloc.encode("idna") - self.putheader('Host', netloc_enc) - else: - try: - host_enc = self.host.encode("ascii") - except UnicodeEncodeError: - host_enc = self.host.encode("idna") - if self.port == self.default_port: - self.putheader('Host', host_enc) - else: - self.putheader('Host', "%s:%s" % (host_enc, self.port)) - - # note: we are assuming that clients will not attempt to set these - # headers since *this* library must deal with the - # consequences. this also means that when the supporting - # libraries are updated to recognize other forms, then this - # code should be changed (removed or updated). - - # we only want a Content-Encoding of "identity" since we don't - # support encodings such as x-gzip or x-deflate. - if not skip_accept_encoding: - self.putheader('Accept-Encoding', 'identity') - - # we can accept "chunked" Transfer-Encodings, but no others - # NOTE: no TE header implies *only* "chunked" - #self.putheader('TE', 'chunked') - - # if TE is supplied in the header, then it must appear in a - # Connection header. - #self.putheader('Connection', 'TE') - - else: - # For HTTP/1.0, the server will assume "not chunked" - pass - - def putheader(self, header, value): - """Send a request header line to the server. - - For example: h.putheader('Accept', 'text/html') - """ - if self.__state != _CS_REQ_STARTED: - raise CannotSendHeader() - - str = '%s: %s' % (header, value) - self._output(str) - - def endheaders(self): - """Indicate that the last header line has been sent to the server.""" - - if self.__state == _CS_REQ_STARTED: - self.__state = _CS_REQ_SENT - else: - raise CannotSendHeader() - - self._send_output() - - def request(self, method, url, body=None, headers={}): - """Send a complete request to the server.""" - - try: - self._send_request(method, url, body, headers) - except socket.error, v: - # trap 'Broken pipe' if we're allowed to automatically reconnect - if v[0] != 32 or not self.auto_open: - raise - # try one more time - self._send_request(method, url, body, headers) - - def _send_request(self, method, url, body, headers): - # honour explicitly requested Host: and Accept-Encoding headers - header_names = dict.fromkeys([k.lower() for k in headers]) - skips = {} - if 'host' in header_names: - skips['skip_host'] = 1 - if 'accept-encoding' in header_names: - skips['skip_accept_encoding'] = 1 - - self.putrequest(method, url, **skips) - - if body and ('content-length' not in header_names): - thelen=None - try: - thelen=str(len(body)) - except TypeError, te: - # If this is a file-like object, try to - # fstat its file descriptor - import os - try: - thelen = str(os.fstat(body.fileno()).st_size) - except (AttributeError, OSError): - # Don't send a length if this failed - if self.debuglevel > 0: print "Cannot stat!!" - - if thelen is not None: - self.putheader('Content-Length',thelen) - for hdr, value in headers.iteritems(): - self.putheader(hdr, value) - self.endheaders() - - if body: - self.send(body) - - def getresponse(self): - "Get the response from the server." - - # if a prior response has been completed, then forget about it. - if self.__response and self.__response.isclosed(): - self.__response = None - - # - # if a prior response exists, then it must be completed (otherwise, we - # cannot read this response's header to determine the connection-close - # behavior) - # - # note: if a prior response existed, but was connection-close, then the - # socket and response were made independent of this HTTPConnection - # object since a new request requires that we open a whole new - # connection - # - # this means the prior response had one of two states: - # 1) will_close: this connection was reset and the prior socket and - # response operate independently - # 2) persistent: the response was retained and we await its - # isclosed() status to become true. - # - if self.__state != _CS_REQ_SENT or self.__response: - raise ResponseNotReady() - - if self.debuglevel > 0: - response = self.response_class(self.sock, self.debuglevel, - strict=self.strict, - method=self._method) - else: - response = self.response_class(self.sock, strict=self.strict, - method=self._method) - - response.begin() - assert response.will_close != _UNKNOWN - self.__state = _CS_IDLE - - if response.will_close: - # this effectively passes the connection to the response - self.close() - else: - # remember this, so we can tell when it is complete - self.__response = response - - return response - - -class HTTP: - "Compatibility class with httplib.py from 1.5." - - _http_vsn = 10 - _http_vsn_str = 'HTTP/1.0' - - debuglevel = 0 - - _connection_class = HTTPConnection - - def __init__(self, host='', port=None, strict=None): - "Provide a default host, since the superclass requires one." - - # some joker passed 0 explicitly, meaning default port - if port == 0: - port = None - - # Note that we may pass an empty string as the host; this will throw - # an error when we attempt to connect. Presumably, the client code - # will call connect before then, with a proper host. - self._setup(self._connection_class(host, port, strict)) - - def _setup(self, conn): - self._conn = conn - - # set up delegation to flesh out interface - self.send = conn.send - self.putrequest = conn.putrequest - self.endheaders = conn.endheaders - self.set_debuglevel = conn.set_debuglevel - - conn._http_vsn = self._http_vsn - conn._http_vsn_str = self._http_vsn_str - - self.file = None - - def connect(self, host=None, port=None): - "Accept arguments to set the host/port, since the superclass doesn't." - - if host is not None: - self._conn._set_hostport(host, port) - self._conn.connect() - - def getfile(self): - "Provide a getfile, since the superclass' does not use this concept." - return self.file - - def putheader(self, header, *values): - "The superclass allows only one value argument." - self._conn.putheader(header, '\r\n\t'.join(values)) - - def getreply(self): - """Compat definition since superclass does not define it. - - Returns a tuple consisting of: - - server status code (e.g. '200' if all goes well) - - server "reason" corresponding to status code - - any RFC822 headers in the response from the server - """ - try: - response = self._conn.getresponse() - except BadStatusLine, e: - ### hmm. if getresponse() ever closes the socket on a bad request, - ### then we are going to have problems with self.sock - - ### should we keep this behavior? do people use it? - # keep the socket open (as a file), and return it - self.file = self._conn.sock.makefile('rb', 0) - - # close our socket -- we want to restart after any protocol error - self.close() - - self.headers = None - return -1, e.line, None - - self.headers = response.msg - self.file = response.fp - return response.status, response.reason, response.msg - - def close(self): - self._conn.close() - - # note that self.file == response.fp, which gets closed by the - # superclass. just clear the object ref here. - ### hmm. messy. if status==-1, then self.file is owned by us. - ### well... we aren't explicitly closing, but losing this ref will - ### do it - self.file = None - -try: - import ssl -except ImportError: - pass -else: - class HTTPSConnection(HTTPConnection): - "This class allows communication via SSL." - - default_port = HTTPS_PORT - - def __init__(self, host, port=None, key_file=None, cert_file=None, - strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): - HTTPConnection.__init__(self, host, port, strict, timeout) - self.key_file = key_file - self.cert_file = cert_file - - def connect(self): - "Connect to a host on a given (SSL) port." - - sock = socket.create_connection((self.host, self.port), self.timeout) - if self._tunnel_host: - self.sock = sock - self._tunnel() - self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file) - - __all__.append("HTTPSConnection") - - class HTTPS(HTTP): - """Compatibility with 1.5 httplib interface - - Python 1.5.2 did not have an HTTPS class, but it defined an - interface for sending http requests that is also useful for - https. - """ - - _connection_class = HTTPSConnection - - def __init__(self, host='', port=None, key_file=None, cert_file=None, - strict=None): - # provide a default host, pass the X509 cert info - - # urf. compensate for bad input. - if port == 0: - port = None - self._setup(self._connection_class(host, port, key_file, - cert_file, strict)) - - # we never actually use these for anything, but we keep them - # here for compatibility with post-1.5.2 CVS. - self.key_file = key_file - self.cert_file = cert_file - - - def FakeSocket (sock, sslobj): - warnings.warn("FakeSocket is deprecated, and won't be in 3.x. " + - "Use the result of ssl.wrap_socket() directly instead.", - DeprecationWarning, stacklevel=2) - return sslobj - - -class HTTPException(Exception): - # Subclasses that define an __init__ must call Exception.__init__ - # or define self.args. Otherwise, str() will fail. - pass - -class NotConnected(HTTPException): - pass - -class InvalidURL(HTTPException): - pass - -class UnknownProtocol(HTTPException): - def __init__(self, version): - self.args = version, - self.version = version - -class UnknownTransferEncoding(HTTPException): - pass - -class UnimplementedFileMode(HTTPException): - pass - -class IncompleteRead(HTTPException): - def __init__(self, partial, expected=None): - self.args = partial, - self.partial = partial - self.expected = expected - def __repr__(self): - if self.expected is not None: - e = ', %i more expected' % self.expected - else: - e = '' - return 'IncompleteRead(%i bytes read%s)' % (len(self.partial), e) - def __str__(self): - return repr(self) - -class ImproperConnectionState(HTTPException): - pass - -class CannotSendRequest(ImproperConnectionState): - pass - -class CannotSendHeader(ImproperConnectionState): - pass - -class ResponseNotReady(ImproperConnectionState): - pass - -class BadStatusLine(HTTPException): - def __init__(self, line): - self.args = line, - self.line = line - -# for backwards compatibility -error = HTTPException - -class LineAndFileWrapper: - """A limited file-like object for HTTP/0.9 responses.""" - - # The status-line parsing code calls readline(), which normally - # get the HTTP status line. For a 0.9 response, however, this is - # actually the first line of the body! Clients need to get a - # readable file object that contains that line. - - def __init__(self, line, file): - self._line = line - self._file = file - self._line_consumed = 0 - self._line_offset = 0 - self._line_left = len(line) - - def __getattr__(self, attr): - return getattr(self._file, attr) - - def _done(self): - # called when the last byte is read from the line. After the - # call, all read methods are delegated to the underlying file - # object. - self._line_consumed = 1 - self.read = self._file.read - self.readline = self._file.readline - self.readlines = self._file.readlines - - def read(self, amt=None): - if self._line_consumed: - return self._file.read(amt) - assert self._line_left - if amt is None or amt > self._line_left: - s = self._line[self._line_offset:] - self._done() - if amt is None: - return s + self._file.read() - else: - return s + self._file.read(amt - len(s)) - else: - assert amt <= self._line_left - i = self._line_offset - j = i + amt - s = self._line[i:j] - self._line_offset = j - self._line_left -= amt - if self._line_left == 0: - self._done() - return s - - def readline(self): - if self._line_consumed: - return self._file.readline() - assert self._line_left - s = self._line[self._line_offset:] - self._done() - return s - - def readlines(self, size=None): - if self._line_consumed: - return self._file.readlines(size) - assert self._line_left - L = [self._line[self._line_offset:]] - self._done() - if size is None: - return L + self._file.readlines() - else: - return L + self._file.readlines(size) - -def test(): - """Test this module. - - A hodge podge of tests collected here, because they have too many - external dependencies for the regular test suite. - """ - - import sys - import getopt - opts, args = getopt.getopt(sys.argv[1:], 'd') - dl = 0 - for o, a in opts: - if o == '-d': dl = dl + 1 - host = 'www.python.org' - selector = '/' - if args[0:]: host = args[0] - if args[1:]: selector = args[1] - h = HTTP() - h.set_debuglevel(dl) - h.connect(host) - h.putrequest('GET', selector) - h.endheaders() - status, reason, headers = h.getreply() - print 'status =', status - print 'reason =', reason - print "read", len(h.getfile().read()) - print - if headers: - for header in headers.headers: print header.strip() - print - - # minimal test that code to extract host from url works - class HTTP11(HTTP): - _http_vsn = 11 - _http_vsn_str = 'HTTP/1.1' - - h = HTTP11('www.python.org') - h.putrequest('GET', 'http://www.python.org/~jeremy/') - h.endheaders() - h.getreply() - h.close() - - try: - import ssl - except ImportError: - pass - else: - - for host, selector in (('sourceforge.net', '/projects/python'), - ): - print "https://%s%s" % (host, selector) - hs = HTTPS() - hs.set_debuglevel(dl) - hs.connect(host) - hs.putrequest('GET', selector) - hs.endheaders() - status, reason, headers = hs.getreply() - print 'status =', status - print 'reason =', reason - print "read", len(hs.getfile().read()) - print - if headers: - for header in headers.headers: print header.strip() - print - -if __name__ == '__main__': - test() diff --git a/icarus-miner/data/usr/lib/python2.6/httplib.pyc b/icarus-miner/data/usr/lib/python2.6/httplib.pyc deleted file mode 100644 index 7fb6975..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/httplib.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/json/__init__.py b/icarus-miner/data/usr/lib/python2.6/json/__init__.py deleted file mode 100644 index 56116f4..0000000 --- a/icarus-miner/data/usr/lib/python2.6/json/__init__.py +++ /dev/null @@ -1,318 +0,0 @@ -r"""A simple, fast, extensible JSON encoder and decoder - -JSON (JavaScript Object Notation) is a subset of -JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data -interchange format. - -json exposes an API familiar to uses of the standard library -marshal and pickle modules. - -Encoding basic Python object hierarchies:: - - >>> import json - >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}]) - '["foo", {"bar": ["baz", null, 1.0, 2]}]' - >>> print json.dumps("\"foo\bar") - "\"foo\bar" - >>> print json.dumps(u'\u1234') - "\u1234" - >>> print json.dumps('\\') - "\\" - >>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True) - {"a": 0, "b": 0, "c": 0} - >>> from StringIO import StringIO - >>> io = StringIO() - >>> json.dump(['streaming API'], io) - >>> io.getvalue() - '["streaming API"]' - -Compact encoding:: - - >>> import json - >>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':')) - '[1,2,3,{"4":5,"6":7}]' - -Pretty printing (using repr() because of extraneous whitespace in the output):: - - >>> import json - >>> print repr(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)) - '{\n "4": 5, \n "6": 7\n}' - -Decoding JSON:: - - >>> import json - >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') - [u'foo', {u'bar': [u'baz', None, 1.0, 2]}] - >>> json.loads('"\\"foo\\bar"') - u'"foo\x08ar' - >>> from StringIO import StringIO - >>> io = StringIO('["streaming API"]') - >>> json.load(io) - [u'streaming API'] - -Specializing JSON object decoding:: - - >>> import json - >>> def as_complex(dct): - ... if '__complex__' in dct: - ... return complex(dct['real'], dct['imag']) - ... return dct - ... - >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}', - ... object_hook=as_complex) - (1+2j) - >>> import decimal - >>> json.loads('1.1', parse_float=decimal.Decimal) - Decimal('1.1') - -Extending JSONEncoder:: - - >>> import json - >>> class ComplexEncoder(json.JSONEncoder): - ... def default(self, obj): - ... if isinstance(obj, complex): - ... return [obj.real, obj.imag] - ... return json.JSONEncoder.default(self, obj) - ... - >>> dumps(2 + 1j, cls=ComplexEncoder) - '[2.0, 1.0]' - >>> ComplexEncoder().encode(2 + 1j) - '[2.0, 1.0]' - >>> list(ComplexEncoder().iterencode(2 + 1j)) - ['[', '2.0', ', ', '1.0', ']'] - - -Using json.tool from the shell to validate and -pretty-print:: - - $ echo '{"json":"obj"}' | python -mjson.tool - { - "json": "obj" - } - $ echo '{ 1.2:3.4}' | python -mjson.tool - Expecting property name: line 1 column 2 (char 2) - -Note that the JSON produced by this module's default settings -is a subset of YAML, so it may be used as a serializer for that as well. - -""" - -__version__ = '1.9' -__all__ = [ - 'dump', 'dumps', 'load', 'loads', - 'JSONDecoder', 'JSONEncoder', -] - -__author__ = 'Bob Ippolito ' - -from .decoder import JSONDecoder -from .encoder import JSONEncoder - -_default_encoder = JSONEncoder( - skipkeys=False, - ensure_ascii=True, - check_circular=True, - allow_nan=True, - indent=None, - separators=None, - encoding='utf-8', - default=None, -) - -def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True, - allow_nan=True, cls=None, indent=None, separators=None, - encoding='utf-8', default=None, **kw): - """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a - ``.write()``-supporting file-like object). - - If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types - (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) - will be skipped instead of raising a ``TypeError``. - - If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp`` - may be ``unicode`` instances, subject to normal Python ``str`` to - ``unicode`` coercion rules. Unless ``fp.write()`` explicitly - understands ``unicode`` (as in ``codecs.getwriter()``) this is likely - to cause an error. - - If ``check_circular`` is ``False``, then the circular reference check - for container types will be skipped and a circular reference will - result in an ``OverflowError`` (or worse). - - If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to - serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) - in strict compliance of the JSON specification, instead of using the - JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). - - If ``indent`` is a non-negative integer, then JSON array elements and object - members will be pretty-printed with that indent level. An indent level - of 0 will only insert newlines. ``None`` is the most compact representation. - - If ``separators`` is an ``(item_separator, dict_separator)`` tuple - then it will be used instead of the default ``(', ', ': ')`` separators. - ``(',', ':')`` is the most compact JSON representation. - - ``encoding`` is the character encoding for str instances, default is UTF-8. - - ``default(obj)`` is a function that should return a serializable version - of obj or raise TypeError. The default simply raises TypeError. - - To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the - ``.default()`` method to serialize additional types), specify it with - the ``cls`` kwarg. - - """ - # cached encoder - if (skipkeys is False and ensure_ascii is True and - check_circular is True and allow_nan is True and - cls is None and indent is None and separators is None and - encoding == 'utf-8' and default is None and not kw): - iterable = _default_encoder.iterencode(obj) - else: - if cls is None: - cls = JSONEncoder - iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, - check_circular=check_circular, allow_nan=allow_nan, indent=indent, - separators=separators, encoding=encoding, - default=default, **kw).iterencode(obj) - # could accelerate with writelines in some versions of Python, at - # a debuggability cost - for chunk in iterable: - fp.write(chunk) - - -def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True, - allow_nan=True, cls=None, indent=None, separators=None, - encoding='utf-8', default=None, **kw): - """Serialize ``obj`` to a JSON formatted ``str``. - - If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types - (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) - will be skipped instead of raising a ``TypeError``. - - If ``ensure_ascii`` is ``False``, then the return value will be a - ``unicode`` instance subject to normal Python ``str`` to ``unicode`` - coercion rules instead of being escaped to an ASCII ``str``. - - If ``check_circular`` is ``False``, then the circular reference check - for container types will be skipped and a circular reference will - result in an ``OverflowError`` (or worse). - - If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to - serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in - strict compliance of the JSON specification, instead of using the - JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). - - If ``indent`` is a non-negative integer, then JSON array elements and - object members will be pretty-printed with that indent level. An indent - level of 0 will only insert newlines. ``None`` is the most compact - representation. - - If ``separators`` is an ``(item_separator, dict_separator)`` tuple - then it will be used instead of the default ``(', ', ': ')`` separators. - ``(',', ':')`` is the most compact JSON representation. - - ``encoding`` is the character encoding for str instances, default is UTF-8. - - ``default(obj)`` is a function that should return a serializable version - of obj or raise TypeError. The default simply raises TypeError. - - To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the - ``.default()`` method to serialize additional types), specify it with - the ``cls`` kwarg. - - """ - # cached encoder - if (skipkeys is False and ensure_ascii is True and - check_circular is True and allow_nan is True and - cls is None and indent is None and separators is None and - encoding == 'utf-8' and default is None and not kw): - return _default_encoder.encode(obj) - if cls is None: - cls = JSONEncoder - return cls( - skipkeys=skipkeys, ensure_ascii=ensure_ascii, - check_circular=check_circular, allow_nan=allow_nan, indent=indent, - separators=separators, encoding=encoding, default=default, - **kw).encode(obj) - - -_default_decoder = JSONDecoder(encoding=None, object_hook=None) - - -def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, - parse_int=None, parse_constant=None, **kw): - """Deserialize ``fp`` (a ``.read()``-supporting file-like object - containing a JSON document) to a Python object. - - If the contents of ``fp`` is encoded with an ASCII based encoding other - than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must - be specified. Encodings that are not ASCII based (such as UCS-2) are - not allowed, and should be wrapped with - ``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode`` - object and passed to ``loads()`` - - ``object_hook`` is an optional function that will be called with the - result of any object literal decode (a ``dict``). The return value of - ``object_hook`` will be used instead of the ``dict``. This feature - can be used to implement custom decoders (e.g. JSON-RPC class hinting). - - To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` - kwarg. - - """ - return loads(fp.read(), - encoding=encoding, cls=cls, object_hook=object_hook, - parse_float=parse_float, parse_int=parse_int, - parse_constant=parse_constant, **kw) - - -def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None, - parse_int=None, parse_constant=None, **kw): - """Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON - document) to a Python object. - - If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding - other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name - must be specified. Encodings that are not ASCII based (such as UCS-2) - are not allowed and should be decoded to ``unicode`` first. - - ``object_hook`` is an optional function that will be called with the - result of any object literal decode (a ``dict``). The return value of - ``object_hook`` will be used instead of the ``dict``. This feature - can be used to implement custom decoders (e.g. JSON-RPC class hinting). - - ``parse_float``, if specified, will be called with the string - of every JSON float to be decoded. By default this is equivalent to - float(num_str). This can be used to use another datatype or parser - for JSON floats (e.g. decimal.Decimal). - - ``parse_int``, if specified, will be called with the string - of every JSON int to be decoded. By default this is equivalent to - int(num_str). This can be used to use another datatype or parser - for JSON integers (e.g. float). - - ``parse_constant``, if specified, will be called with one of the - following strings: -Infinity, Infinity, NaN, null, true, false. - This can be used to raise an exception if invalid JSON numbers - are encountered. - - To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` - kwarg. - - """ - if (cls is None and encoding is None and object_hook is None and - parse_int is None and parse_float is None and - parse_constant is None and not kw): - return _default_decoder.decode(s) - if cls is None: - cls = JSONDecoder - if object_hook is not None: - kw['object_hook'] = object_hook - if parse_float is not None: - kw['parse_float'] = parse_float - if parse_int is not None: - kw['parse_int'] = parse_int - if parse_constant is not None: - kw['parse_constant'] = parse_constant - return cls(encoding=encoding, **kw).decode(s) diff --git a/icarus-miner/data/usr/lib/python2.6/json/__init__.pyc b/icarus-miner/data/usr/lib/python2.6/json/__init__.pyc deleted file mode 100644 index 391da7b..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/json/__init__.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/json/decoder.py b/icarus-miner/data/usr/lib/python2.6/json/decoder.py deleted file mode 100644 index e80b935..0000000 --- a/icarus-miner/data/usr/lib/python2.6/json/decoder.py +++ /dev/null @@ -1,339 +0,0 @@ -"""Implementation of JSONDecoder -""" - -import re -import sys - -from json.scanner import Scanner, pattern -try: - from _json import scanstring as c_scanstring -except ImportError: - c_scanstring = None - -__all__ = ['JSONDecoder'] - -FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL - -NaN, PosInf, NegInf = float('nan'), float('inf'), float('-inf') - - -def linecol(doc, pos): - lineno = doc.count('\n', 0, pos) + 1 - if lineno == 1: - colno = pos - else: - colno = pos - doc.rindex('\n', 0, pos) - return lineno, colno - - -def errmsg(msg, doc, pos, end=None): - lineno, colno = linecol(doc, pos) - if end is None: - fmt = '{0}: line {1} column {2} (char {3})' - return fmt.format(msg, lineno, colno, pos) - endlineno, endcolno = linecol(doc, end) - fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})' - return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end) - - -_CONSTANTS = { - '-Infinity': NegInf, - 'Infinity': PosInf, - 'NaN': NaN, - 'true': True, - 'false': False, - 'null': None, -} - - -def JSONConstant(match, context, c=_CONSTANTS): - s = match.group(0) - fn = getattr(context, 'parse_constant', None) - if fn is None: - rval = c[s] - else: - rval = fn(s) - return rval, None -pattern('(-?Infinity|NaN|true|false|null)')(JSONConstant) - - -def JSONNumber(match, context): - match = JSONNumber.regex.match(match.string, *match.span()) - integer, frac, exp = match.groups() - if frac or exp: - fn = getattr(context, 'parse_float', None) or float - res = fn(integer + (frac or '') + (exp or '')) - else: - fn = getattr(context, 'parse_int', None) or int - res = fn(integer) - return res, None -pattern(r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?')(JSONNumber) - - -STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS) -BACKSLASH = { - '"': u'"', '\\': u'\\', '/': u'/', - 'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t', -} - -DEFAULT_ENCODING = "utf-8" - - -def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match): - if encoding is None: - encoding = DEFAULT_ENCODING - chunks = [] - _append = chunks.append - begin = end - 1 - while 1: - chunk = _m(s, end) - if chunk is None: - raise ValueError( - errmsg("Unterminated string starting at", s, begin)) - end = chunk.end() - content, terminator = chunk.groups() - if content: - if not isinstance(content, unicode): - content = unicode(content, encoding) - _append(content) - if terminator == '"': - break - elif terminator != '\\': - if strict: - msg = "Invalid control character {0!r} at".format(terminator) - raise ValueError(errmsg(msg, s, end)) - else: - _append(terminator) - continue - try: - esc = s[end] - except IndexError: - raise ValueError( - errmsg("Unterminated string starting at", s, begin)) - if esc != 'u': - try: - m = _b[esc] - except KeyError: - msg = "Invalid \\escape: {0!r}".format(esc) - raise ValueError(errmsg(msg, s, end)) - end += 1 - else: - esc = s[end + 1:end + 5] - next_end = end + 5 - msg = "Invalid \\uXXXX escape" - try: - if len(esc) != 4: - raise ValueError - uni = int(esc, 16) - if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535: - msg = "Invalid \\uXXXX\\uXXXX surrogate pair" - if not s[end + 5:end + 7] == '\\u': - raise ValueError - esc2 = s[end + 7:end + 11] - if len(esc2) != 4: - raise ValueError - uni2 = int(esc2, 16) - uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00)) - next_end += 6 - m = unichr(uni) - except ValueError: - raise ValueError(errmsg(msg, s, end)) - end = next_end - _append(m) - return u''.join(chunks), end - - -# Use speedup -if c_scanstring is not None: - scanstring = c_scanstring -else: - scanstring = py_scanstring - -def JSONString(match, context): - encoding = getattr(context, 'encoding', None) - strict = getattr(context, 'strict', True) - return scanstring(match.string, match.end(), encoding, strict) -pattern(r'"')(JSONString) - - -WHITESPACE = re.compile(r'\s*', FLAGS) - - -def JSONObject(match, context, _w=WHITESPACE.match): - pairs = {} - s = match.string - end = _w(s, match.end()).end() - nextchar = s[end:end + 1] - # Trivial empty object - if nextchar == '}': - return pairs, end + 1 - if nextchar != '"': - raise ValueError(errmsg("Expecting property name", s, end)) - end += 1 - encoding = getattr(context, 'encoding', None) - strict = getattr(context, 'strict', True) - iterscan = JSONScanner.iterscan - while True: - key, end = scanstring(s, end, encoding, strict) - end = _w(s, end).end() - if s[end:end + 1] != ':': - raise ValueError(errmsg("Expecting : delimiter", s, end)) - end = _w(s, end + 1).end() - try: - value, end = iterscan(s, idx=end, context=context).next() - except StopIteration: - raise ValueError(errmsg("Expecting object", s, end)) - pairs[key] = value - end = _w(s, end).end() - nextchar = s[end:end + 1] - end += 1 - if nextchar == '}': - break - if nextchar != ',': - raise ValueError(errmsg("Expecting , delimiter", s, end - 1)) - end = _w(s, end).end() - nextchar = s[end:end + 1] - end += 1 - if nextchar != '"': - raise ValueError(errmsg("Expecting property name", s, end - 1)) - object_hook = getattr(context, 'object_hook', None) - if object_hook is not None: - pairs = object_hook(pairs) - return pairs, end -pattern(r'{')(JSONObject) - - -def JSONArray(match, context, _w=WHITESPACE.match): - values = [] - s = match.string - end = _w(s, match.end()).end() - # Look-ahead for trivial empty array - nextchar = s[end:end + 1] - if nextchar == ']': - return values, end + 1 - iterscan = JSONScanner.iterscan - while True: - try: - value, end = iterscan(s, idx=end, context=context).next() - except StopIteration: - raise ValueError(errmsg("Expecting object", s, end)) - values.append(value) - end = _w(s, end).end() - nextchar = s[end:end + 1] - end += 1 - if nextchar == ']': - break - if nextchar != ',': - raise ValueError(errmsg("Expecting , delimiter", s, end)) - end = _w(s, end).end() - return values, end -pattern(r'\[')(JSONArray) - - -ANYTHING = [ - JSONObject, - JSONArray, - JSONString, - JSONConstant, - JSONNumber, -] - -JSONScanner = Scanner(ANYTHING) - - -class JSONDecoder(object): - """Simple JSON decoder - - Performs the following translations in decoding by default: - - +---------------+-------------------+ - | JSON | Python | - +===============+===================+ - | object | dict | - +---------------+-------------------+ - | array | list | - +---------------+-------------------+ - | string | unicode | - +---------------+-------------------+ - | number (int) | int, long | - +---------------+-------------------+ - | number (real) | float | - +---------------+-------------------+ - | true | True | - +---------------+-------------------+ - | false | False | - +---------------+-------------------+ - | null | None | - +---------------+-------------------+ - - It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as - their corresponding ``float`` values, which is outside the JSON spec. - """ - - _scanner = Scanner(ANYTHING) - __all__ = ['__init__', 'decode', 'raw_decode'] - - def __init__(self, encoding=None, object_hook=None, parse_float=None, - parse_int=None, parse_constant=None, strict=True): - """``encoding`` determines the encoding used to interpret any ``str`` - objects decoded by this instance (utf-8 by default). It has no - effect when decoding ``unicode`` objects. - - Note that currently only encodings that are a superset of ASCII work, - strings of other encodings should be passed in as ``unicode``. - - ``object_hook``, if specified, will be called with the result of - every JSON object decoded and its return value will be used in - place of the given ``dict``. This can be used to provide custom - deserializations (e.g. to support JSON-RPC class hinting). - - ``parse_float``, if specified, will be called with the string - of every JSON float to be decoded. By default this is equivalent to - float(num_str). This can be used to use another datatype or parser - for JSON floats (e.g. decimal.Decimal). - - ``parse_int``, if specified, will be called with the string - of every JSON int to be decoded. By default this is equivalent to - int(num_str). This can be used to use another datatype or parser - for JSON integers (e.g. float). - - ``parse_constant``, if specified, will be called with one of the - following strings: -Infinity, Infinity, NaN, null, true, false. - This can be used to raise an exception if invalid JSON numbers - are encountered. - - """ - self.encoding = encoding - self.object_hook = object_hook - self.parse_float = parse_float - self.parse_int = parse_int - self.parse_constant = parse_constant - self.strict = strict - - def decode(self, s, _w=WHITESPACE.match): - """ - Return the Python representation of ``s`` (a ``str`` or ``unicode`` - instance containing a JSON document) - - """ - obj, end = self.raw_decode(s, idx=_w(s, 0).end()) - end = _w(s, end).end() - if end != len(s): - raise ValueError(errmsg("Extra data", s, end, len(s))) - return obj - - def raw_decode(self, s, **kw): - """Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning - with a JSON document) and return a 2-tuple of the Python - representation and the index in ``s`` where the document ended. - - This can be used to decode a JSON document from a string that may - have extraneous data at the end. - - """ - kw.setdefault('context', self) - try: - obj, end = self._scanner.iterscan(s, **kw).next() - except StopIteration: - raise ValueError("No JSON object could be decoded") - return obj, end diff --git a/icarus-miner/data/usr/lib/python2.6/json/decoder.pyc b/icarus-miner/data/usr/lib/python2.6/json/decoder.pyc deleted file mode 100644 index 3578817..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/json/decoder.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/json/encoder.py b/icarus-miner/data/usr/lib/python2.6/json/encoder.py deleted file mode 100644 index b1dd570..0000000 --- a/icarus-miner/data/usr/lib/python2.6/json/encoder.py +++ /dev/null @@ -1,384 +0,0 @@ -"""Implementation of JSONEncoder -""" - -import re -import math - -try: - from _json import encode_basestring_ascii as c_encode_basestring_ascii -except ImportError: - c_encode_basestring_ascii = None - -__all__ = ['JSONEncoder'] - -ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') -ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') -HAS_UTF8 = re.compile(r'[\x80-\xff]') -ESCAPE_DCT = { - '\\': '\\\\', - '"': '\\"', - '\b': '\\b', - '\f': '\\f', - '\n': '\\n', - '\r': '\\r', - '\t': '\\t', -} -for i in range(0x20): - ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i)) - -FLOAT_REPR = repr - -def floatstr(o, allow_nan=True): - # Check for specials. Note that this type of test is processor- and/or - # platform-specific, so do tests which don't depend on the internals. - - if math.isnan(o): - text = 'NaN' - elif math.isinf(o): - if math.copysign(1., o) == 1.: - text = 'Infinity' - else: - text = '-Infinity' - else: - return FLOAT_REPR(o) - - if not allow_nan: - msg = "Out of range float values are not JSON compliant: " + repr(o) - raise ValueError(msg) - - return text - - -def encode_basestring(s): - """Return a JSON representation of a Python string - - """ - def replace(match): - return ESCAPE_DCT[match.group(0)] - return '"' + ESCAPE.sub(replace, s) + '"' - - -def py_encode_basestring_ascii(s): - if isinstance(s, str) and HAS_UTF8.search(s) is not None: - s = s.decode('utf-8') - def replace(match): - s = match.group(0) - try: - return ESCAPE_DCT[s] - except KeyError: - n = ord(s) - if n < 0x10000: - return '\\u{0:04x}'.format(n) - else: - # surrogate pair - n -= 0x10000 - s1 = 0xd800 | ((n >> 10) & 0x3ff) - s2 = 0xdc00 | (n & 0x3ff) - return '\\u{0:04x}\\u{1:04x}'.format(s1, s2) - return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' - - -if c_encode_basestring_ascii is not None: - encode_basestring_ascii = c_encode_basestring_ascii -else: - encode_basestring_ascii = py_encode_basestring_ascii - - -class JSONEncoder(object): - """Extensible JSON encoder for Python data structures. - - Supports the following objects and types by default: - - +-------------------+---------------+ - | Python | JSON | - +===================+===============+ - | dict | object | - +-------------------+---------------+ - | list, tuple | array | - +-------------------+---------------+ - | str, unicode | string | - +-------------------+---------------+ - | int, long, float | number | - +-------------------+---------------+ - | True | true | - +-------------------+---------------+ - | False | false | - +-------------------+---------------+ - | None | null | - +-------------------+---------------+ - - To extend this to recognize other objects, subclass and implement a - ``.default()`` method with another method that returns a serializable - object for ``o`` if possible, otherwise it should call the superclass - implementation (to raise ``TypeError``). - - """ - __all__ = ['__init__', 'default', 'encode', 'iterencode'] - item_separator = ', ' - key_separator = ': ' - def __init__(self, skipkeys=False, ensure_ascii=True, - check_circular=True, allow_nan=True, sort_keys=False, - indent=None, separators=None, encoding='utf-8', default=None): - """Constructor for JSONEncoder, with sensible defaults. - - If skipkeys is False, then it is a TypeError to attempt - encoding of keys that are not str, int, long, float or None. If - skipkeys is True, such items are simply skipped. - - If ensure_ascii is True, the output is guaranteed to be str - objects with all incoming unicode characters escaped. If - ensure_ascii is false, the output will be unicode object. - - If check_circular is True, then lists, dicts, and custom encoded - objects will be checked for circular references during encoding to - prevent an infinite recursion (which would cause an OverflowError). - Otherwise, no such check takes place. - - If allow_nan is True, then NaN, Infinity, and -Infinity will be - encoded as such. This behavior is not JSON specification compliant, - but is consistent with most JavaScript based encoders and decoders. - Otherwise, it will be a ValueError to encode such floats. - - If sort_keys is True, then the output of dictionaries will be - sorted by key; this is useful for regression tests to ensure - that JSON serializations can be compared on a day-to-day basis. - - If indent is a non-negative integer, then JSON array - elements and object members will be pretty-printed with that - indent level. An indent level of 0 will only insert newlines. - None is the most compact representation. - - If specified, separators should be a (item_separator, key_separator) - tuple. The default is (', ', ': '). To get the most compact JSON - representation you should specify (',', ':') to eliminate whitespace. - - If specified, default is a function that gets called for objects - that can't otherwise be serialized. It should return a JSON encodable - version of the object or raise a ``TypeError``. - - If encoding is not None, then all input strings will be - transformed into unicode using that encoding prior to JSON-encoding. - The default is UTF-8. - - """ - self.skipkeys = skipkeys - self.ensure_ascii = ensure_ascii - self.check_circular = check_circular - self.allow_nan = allow_nan - self.sort_keys = sort_keys - self.indent = indent - self.current_indent_level = 0 - if separators is not None: - self.item_separator, self.key_separator = separators - if default is not None: - self.default = default - self.encoding = encoding - - def _newline_indent(self): - return '\n' + (' ' * (self.indent * self.current_indent_level)) - - def _iterencode_list(self, lst, markers=None): - if not lst: - yield '[]' - return - if markers is not None: - markerid = id(lst) - if markerid in markers: - raise ValueError("Circular reference detected") - markers[markerid] = lst - yield '[' - if self.indent is not None: - self.current_indent_level += 1 - newline_indent = self._newline_indent() - separator = self.item_separator + newline_indent - yield newline_indent - else: - newline_indent = None - separator = self.item_separator - first = True - for value in lst: - if first: - first = False - else: - yield separator - for chunk in self._iterencode(value, markers): - yield chunk - if newline_indent is not None: - self.current_indent_level -= 1 - yield self._newline_indent() - yield ']' - if markers is not None: - del markers[markerid] - - def _iterencode_dict(self, dct, markers=None): - if not dct: - yield '{}' - return - if markers is not None: - markerid = id(dct) - if markerid in markers: - raise ValueError("Circular reference detected") - markers[markerid] = dct - yield '{' - key_separator = self.key_separator - if self.indent is not None: - self.current_indent_level += 1 - newline_indent = self._newline_indent() - item_separator = self.item_separator + newline_indent - yield newline_indent - else: - newline_indent = None - item_separator = self.item_separator - first = True - if self.ensure_ascii: - encoder = encode_basestring_ascii - else: - encoder = encode_basestring - allow_nan = self.allow_nan - if self.sort_keys: - keys = dct.keys() - keys.sort() - items = [(k, dct[k]) for k in keys] - else: - items = dct.iteritems() - _encoding = self.encoding - _do_decode = (_encoding is not None - and not (_encoding == 'utf-8')) - for key, value in items: - if isinstance(key, str): - if _do_decode: - key = key.decode(_encoding) - elif isinstance(key, basestring): - pass - # JavaScript is weakly typed for these, so it makes sense to - # also allow them. Many encoders seem to do something like this. - elif isinstance(key, float): - key = floatstr(key, allow_nan) - elif isinstance(key, (int, long)): - key = str(key) - elif key is True: - key = 'true' - elif key is False: - key = 'false' - elif key is None: - key = 'null' - elif self.skipkeys: - continue - else: - raise TypeError("key {0!r} is not a string".format(key)) - if first: - first = False - else: - yield item_separator - yield encoder(key) - yield key_separator - for chunk in self._iterencode(value, markers): - yield chunk - if newline_indent is not None: - self.current_indent_level -= 1 - yield self._newline_indent() - yield '}' - if markers is not None: - del markers[markerid] - - def _iterencode(self, o, markers=None): - if isinstance(o, basestring): - if self.ensure_ascii: - encoder = encode_basestring_ascii - else: - encoder = encode_basestring - _encoding = self.encoding - if (_encoding is not None and isinstance(o, str) - and not (_encoding == 'utf-8')): - o = o.decode(_encoding) - yield encoder(o) - elif o is None: - yield 'null' - elif o is True: - yield 'true' - elif o is False: - yield 'false' - elif isinstance(o, (int, long)): - yield str(o) - elif isinstance(o, float): - yield floatstr(o, self.allow_nan) - elif isinstance(o, (list, tuple)): - for chunk in self._iterencode_list(o, markers): - yield chunk - elif isinstance(o, dict): - for chunk in self._iterencode_dict(o, markers): - yield chunk - else: - if markers is not None: - markerid = id(o) - if markerid in markers: - raise ValueError("Circular reference detected") - markers[markerid] = o - for chunk in self._iterencode_default(o, markers): - yield chunk - if markers is not None: - del markers[markerid] - - def _iterencode_default(self, o, markers=None): - newobj = self.default(o) - return self._iterencode(newobj, markers) - - def default(self, o): - """Implement this method in a subclass such that it returns a serializable - object for ``o``, or calls the base implementation (to raise a - ``TypeError``). - - For example, to support arbitrary iterators, you could implement - default like this:: - - def default(self, o): - try: - iterable = iter(o) - except TypeError: - pass - else: - return list(iterable) - return JSONEncoder.default(self, o) - - """ - raise TypeError(repr(o) + " is not JSON serializable") - - def encode(self, o): - """Return a JSON string representation of a Python data structure. - - >>> JSONEncoder().encode({"foo": ["bar", "baz"]}) - '{"foo": ["bar", "baz"]}' - - """ - # This is for extremely simple cases and benchmarks. - if isinstance(o, basestring): - if isinstance(o, str): - _encoding = self.encoding - if (_encoding is not None - and not (_encoding == 'utf-8')): - o = o.decode(_encoding) - if self.ensure_ascii: - return encode_basestring_ascii(o) - else: - return encode_basestring(o) - # This doesn't pass the iterator directly to ''.join() because the - # exceptions aren't as detailed. The list call should be roughly - # equivalent to the PySequence_Fast that ''.join() would do. - chunks = list(self.iterencode(o)) - return ''.join(chunks) - - def iterencode(self, o): - """Encode the given object and yield each string representation as - available. - - For example:: - - for chunk in JSONEncoder().iterencode(bigobject): - mysocket.write(chunk) - - """ - if self.check_circular: - markers = {} - else: - markers = None - return self._iterencode(o, markers) diff --git a/icarus-miner/data/usr/lib/python2.6/json/encoder.pyc b/icarus-miner/data/usr/lib/python2.6/json/encoder.pyc deleted file mode 100644 index 9986dac..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/json/encoder.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/json/scanner.py b/icarus-miner/data/usr/lib/python2.6/json/scanner.py deleted file mode 100644 index 4b065ab..0000000 --- a/icarus-miner/data/usr/lib/python2.6/json/scanner.py +++ /dev/null @@ -1,69 +0,0 @@ -"""Iterator based sre token scanner - -""" - -import re -import sre_parse -import sre_compile -import sre_constants - -from re import VERBOSE, MULTILINE, DOTALL -from sre_constants import BRANCH, SUBPATTERN - -__all__ = ['Scanner', 'pattern'] - -FLAGS = (VERBOSE | MULTILINE | DOTALL) - -class Scanner(object): - def __init__(self, lexicon, flags=FLAGS): - self.actions = [None] - # Combine phrases into a compound pattern - s = sre_parse.Pattern() - s.flags = flags - p = [] - for idx, token in enumerate(lexicon): - phrase = token.pattern - try: - subpattern = sre_parse.SubPattern(s, - [(SUBPATTERN, (idx + 1, sre_parse.parse(phrase, flags)))]) - except sre_constants.error: - raise - p.append(subpattern) - self.actions.append(token) - - s.groups = len(p) + 1 # NOTE(guido): Added to make SRE validation work - p = sre_parse.SubPattern(s, [(BRANCH, (None, p))]) - self.scanner = sre_compile.compile(p) - - def iterscan(self, string, idx=0, context=None): - """Yield match, end_idx for each match - - """ - match = self.scanner.scanner(string, idx).match - actions = self.actions - lastend = idx - end = len(string) - while True: - m = match() - if m is None: - break - matchbegin, matchend = m.span() - if lastend == matchend: - break - action = actions[m.lastindex] - if action is not None: - rval, next_pos = action(m, context) - if next_pos is not None and next_pos != matchend: - # "fast forward" the scanner - matchend = next_pos - match = self.scanner.scanner(string, matchend).match - yield rval, matchend - lastend = matchend - - -def pattern(pattern, flags=FLAGS): - def decorator(fn): - fn.pattern = pattern - fn.regex = re.compile(pattern, flags) - return fn - return decorator diff --git a/icarus-miner/data/usr/lib/python2.6/json/scanner.pyc b/icarus-miner/data/usr/lib/python2.6/json/scanner.pyc deleted file mode 100644 index 3030cdc..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/json/scanner.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/json/tool.py b/icarus-miner/data/usr/lib/python2.6/json/tool.py deleted file mode 100644 index daedbb5..0000000 --- a/icarus-miner/data/usr/lib/python2.6/json/tool.py +++ /dev/null @@ -1,37 +0,0 @@ -r"""Command-line tool to validate and pretty-print JSON - -Usage:: - - $ echo '{"json":"obj"}' | python -mjson.tool - { - "json": "obj" - } - $ echo '{ 1.2:3.4}' | python -mjson.tool - Expecting property name: line 1 column 2 (char 2) - -""" -import sys -import json - -def main(): - if len(sys.argv) == 1: - infile = sys.stdin - outfile = sys.stdout - elif len(sys.argv) == 2: - infile = open(sys.argv[1], 'rb') - outfile = sys.stdout - elif len(sys.argv) == 3: - infile = open(sys.argv[1], 'rb') - outfile = open(sys.argv[2], 'wb') - else: - raise SystemExit("{0} [infile [outfile]]".format(sys.argv[0])) - try: - obj = json.load(infile) - except ValueError, e: - raise SystemExit(e) - json.dump(obj, outfile, sort_keys=True, indent=4) - outfile.write('\n') - - -if __name__ == '__main__': - main() diff --git a/icarus-miner/data/usr/lib/python2.6/jsonrpc/__init__.py b/icarus-miner/data/usr/lib/python2.6/jsonrpc/__init__.py deleted file mode 100644 index 71779a1..0000000 --- a/icarus-miner/data/usr/lib/python2.6/jsonrpc/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ - -""" - Copyright (c) 2007 Jan-Klaas Kollhof - - This file is part of jsonrpc. - - jsonrpc is free software; you can redistribute it and/or modify - it under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation; either version 2.1 of the License, or - (at your option) any later version. - - This software is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with this software; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -""" - -from jsonrpc.json import loads, dumps, JSONEncodeException, JSONDecodeException -from jsonrpc.proxy import ServiceProxy, JSONRPCException -from jsonrpc.serviceHandler import ServiceMethod, ServiceHandler, ServiceMethodNotFound, ServiceException -from jsonrpc.cgiwrapper import handleCGI -from jsonrpc.modpywrapper import handler \ No newline at end of file diff --git a/icarus-miner/data/usr/lib/python2.6/jsonrpc/__init__.pyc b/icarus-miner/data/usr/lib/python2.6/jsonrpc/__init__.pyc deleted file mode 100644 index 31fd6a2..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/jsonrpc/__init__.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/jsonrpc/_tests/__init__.py b/icarus-miner/data/usr/lib/python2.6/jsonrpc/_tests/__init__.py deleted file mode 100644 index ac6a920..0000000 --- a/icarus-miner/data/usr/lib/python2.6/jsonrpc/_tests/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ - -""" - Copyright (c) 2007 Jan-Klaas Kollhof - - This file is part of jsonrpc. - - jsonrpc is free software; you can redistribute it and/or modify - it under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation; either version 2.1 of the License, or - (at your option) any later version. - - This software is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with this software; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -""" diff --git a/icarus-miner/data/usr/lib/python2.6/jsonrpc/_tests/test_cgiwrapper.py b/icarus-miner/data/usr/lib/python2.6/jsonrpc/_tests/test_cgiwrapper.py deleted file mode 100644 index d35cd9a..0000000 --- a/icarus-miner/data/usr/lib/python2.6/jsonrpc/_tests/test_cgiwrapper.py +++ /dev/null @@ -1,55 +0,0 @@ - -""" - Copyright (c) 2007 Jan-Klaas Kollhof - - This file is part of jsonrpc. - - jsonrpc is free software; you can redistribute it and/or modify - it under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation; either version 2.1 of the License, or - (at your option) any later version. - - This software is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with this software; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -""" -import unittest -import jsonrpc -from types import * - -class Service(object): - @jsonrpc.ServiceMethod - def echo(self, arg): - return arg - - -class TestCGIWrapper(unittest.TestCase): - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_runCGIHandler(self): - from StringIO import StringIO - - json=u'{"method":"echo","params":["foobar"], "id":""}' - fin=StringIO(json) - fout=StringIO() - - env = {"CONTENT_LENGTH":len(json)} - - jsonrpc.handleCGI(service=Service(), fin=fin, fout=fout, env=env) - - data = StringIO(fout.getvalue()) - data.readline() - data.readline() - data = data.read() - self.assertEquals(jsonrpc.loads(data), {"result":"foobar", "error":None, "id":""}) - diff --git a/icarus-miner/data/usr/lib/python2.6/jsonrpc/_tests/test_json.py b/icarus-miner/data/usr/lib/python2.6/jsonrpc/_tests/test_json.py deleted file mode 100644 index a245abd..0000000 --- a/icarus-miner/data/usr/lib/python2.6/jsonrpc/_tests/test_json.py +++ /dev/null @@ -1,184 +0,0 @@ - -""" - Copyright (c) 2007 Jan-Klaas Kollhof - - This file is part of jsonrpc. - - jsonrpc is free software; you can redistribute it and/or modify - it under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation; either version 2.1 of the License, or - (at your option) any later version. - - This software is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with this software; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -""" - -import unittest -import jsonrpc -from types import * - - - -class TestDumps(unittest.TestCase): - def setUp(self): - pass - - def tearDown(self): - pass - - - def assertJSON(self, json, expectedJSON): - self.assert_(type(json) is UnicodeType) - self.assertEqual(json, expectedJSON) - - def test_Number(self): - json = jsonrpc.dumps(1) - self.assertJSON(json, u'1') - - json = jsonrpc.dumps(0xffffffffffffffffffffffff) - self.assertJSON(json, u'79228162514264337593543950335') - - def test_None(self): - json = jsonrpc.dumps(None) - self.assertJSON(json, u'null') - - def test_Boolean(self): - json = jsonrpc.dumps(False) - self.assertJSON(json, u'false') - json = jsonrpc.dumps(True) - self.assertJSON(json, u'true') - - def test_Float(self): - json = jsonrpc.dumps(1.2345) - self.assertJSON(json, u'1.2345') - - json =jsonrpc.dumps(1.2345e67) - self.assertJSON(json, u'1.2345e+67') - - json =jsonrpc.dumps(1.2345e-67) - self.assertJSON(json, u'1.2345e-67') - - def test_String(self): - json = jsonrpc.dumps('foobar') - self.assertJSON(json, u'"foobar"') - - json = jsonrpc.dumps('foobar') - self.assertJSON(json, u'"foobar"') - - def test_StringEscapedChars(self): - json = jsonrpc.dumps('\n \f \t \b \r \\ " /') - self.assertJSON(json, u'"\\n \\f \\t \\b \\r \\\\ \\" \\/"') - - def test_StringEscapedUnicodeChars(self): - json = jsonrpc.dumps(u'\0 \x19 \x20\u0130') - self.assertJSON(json, u'"\\u0000 \\u0019 \u0130"') - - def test_Array(self): - json = jsonrpc.dumps([1, 2.3e45, 'foobar']) - self.assertJSON(json, u'[1,2.3e+45,"foobar"]') - - def test_Dictionary(self): - json = jsonrpc.dumps({'foobar':'spam', 'a':[1,2,3]}) - self.assertJSON(json, u'{"a":[1,2,3],"foobar":"spam"}') - - def test_FailOther(self): - self.failUnlessRaises(jsonrpc.JSONEncodeException, lambda:jsonrpc.dumps(self)) - - - - -class TestLoads(unittest.TestCase): - def setUp(self): - pass - - def tearDown(self): - pass - - - def test_String(self): - - json = jsonrpc.dumps("foobar") - obj = jsonrpc.loads(json) - self.assertEquals(obj, u"foobar") - - def test_StringEscapedChars(self): - json = '"\\n \\t \\r \\b \\f \\\\ \\/ /"' - obj = jsonrpc.loads(json) - self.assertEquals(obj, u'\n \t \r \b \f \\ / /') - - def test_StringEscapedUnicodeChars(self): - json = jsonrpc.dumps(u'\u0000 \u0019') - obj = jsonrpc.loads(json) - self.assertEquals(obj, u'\0 \x19') - - def test_Array(self): - json = jsonrpc.dumps(['1', ['2','3']]) - obj = jsonrpc.loads(json) - self.assertEquals(obj, ['1', ['2','3']]) - - def test_Dictionary(self): - json = jsonrpc.dumps({'foobar':'spam', 'nested':{'a':'b'}}) - obj = jsonrpc.loads(json) - self.assertEquals(obj, {'foobar':'spam', 'nested':{'a':'b'}}) - - - def test_Int(self): - json = jsonrpc.dumps(1234) - obj = jsonrpc.loads(json) - self.assertEquals(obj, 1234) - - - def test_NegativeInt(self): - json = jsonrpc.dumps(-1234) - obj = jsonrpc.loads(json) - self.assertEquals(obj, -1234) - - def test_NumberAtEndOfArray(self): - json = jsonrpc.dumps([-1234]) - obj = jsonrpc.loads(json) - self.assertEquals(obj, [-1234]) - - def test_StrAtEndOfArray(self): - json = jsonrpc.dumps(['foobar']) - obj = jsonrpc.loads(json) - self.assertEquals(obj, ['foobar']) - - def test_Float(self): - json = jsonrpc.dumps(1234.567) - obj = jsonrpc.loads(json) - self.assertEquals(obj, 1234.567) - - def test_Exponential(self): - json = jsonrpc.dumps(1234.567e89) - obj = jsonrpc.loads(json) - self.assertEquals(obj, 1234.567e89) - - def test_True(self): - json = jsonrpc.dumps(True) - obj = jsonrpc.loads(json) - self.assertEquals(obj, True) - - def test_False(self): - json = jsonrpc.dumps(False) - obj = jsonrpc.loads(json) - self.assertEquals(obj, False) - - def test_None(self): - json = jsonrpc.dumps(None) - obj = jsonrpc.loads(json) - self.assertEquals(obj, None) - - def test_NestedDictAllTypes(self): - json = jsonrpc.dumps({'s':'foobar', 'int':1234, 'float':1234.567, 'exp':1234.56e78, - 'negInt':-1234, 'None':None,'True':True, 'False':False, - 'list':[1,2,4,{}], 'dict':{'a':'b'}}) - obj = jsonrpc.loads(json) - self.assertEquals(obj, {'s':'foobar', 'int':1234, 'float':1234.567, 'exp':1234.56e78, - 'negInt':-1234, 'None':None,'True':True, 'False':False, - 'list':[1,2,4,{}], 'dict':{'a':'b'}}) diff --git a/icarus-miner/data/usr/lib/python2.6/jsonrpc/_tests/test_modpywrapper.py b/icarus-miner/data/usr/lib/python2.6/jsonrpc/_tests/test_modpywrapper.py deleted file mode 100644 index 92575e2..0000000 --- a/icarus-miner/data/usr/lib/python2.6/jsonrpc/_tests/test_modpywrapper.py +++ /dev/null @@ -1,98 +0,0 @@ - -""" - Copyright (c) 2007 Jan-Klaas Kollhof - - This file is part of jsonrpc. - - jsonrpc is free software; you can redistribute it and/or modify - it under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation; either version 2.1 of the License, or - (at your option) any later version. - - This software is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with this software; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -""" -import unittest -import jsonrpc -from types import * - -class Service(object): - @jsonrpc.ServiceMethod - def echo(self, arg): - return arg - - -class ApacheRequestMockup(object): - - def __init__(self, filename, fin, fout): - self.fin=fin - self.fout = fout - self.filename = filename - - def write(self,data): - self.fout.write(data) - - def flush(self): - pass - - def read(self): - return self.fin.read() - -class ModPyMockup(object): - def __init__(self): - self.apache=ApacheModuleMockup() - -class ApacheModuleMockup(object): - def __getattr__(self, name): - return name - - def import_module(self, moduleName, log=1): - return Service() - - - -class TestModPyWrapper(unittest.TestCase): - - def setUp(self): - import sys - sys.modules['mod_python'] =ModPyMockup() - - def tearDown(self): - pass - - def test_runHandler(self): - from StringIO import StringIO - - json=u'{"method":"echo","params":["foobar"], "id":""}' - fin=StringIO(json) - fout=StringIO() - req = ApacheRequestMockup(__file__ , fin, fout) - - jsonrpc.handler(req) - - data = fout.getvalue() - - self.assertEquals(jsonrpc.loads(data), {"result":"foobar", "error":None, "id":""}) - - def test_ServiceImplementationNotFound(self): - from StringIO import StringIO - - json=u'{"method":"echo","params":["foobar"], "id":""}' - fin=StringIO(json) - fout=StringIO() - req = ApacheRequestMockup("foobar" , fin, fout) - - rslt = jsonrpc.handler(req) - self.assertEquals(rslt, "OK") - data = fout.getvalue() - - self.assertEquals(jsonrpc.loads(data), {u'id': '', u'result': None, u'error': {u'message': '', u'name': u'ServiceImplementaionNotFound'}} ) - - - diff --git a/icarus-miner/data/usr/lib/python2.6/jsonrpc/_tests/test_proxy.py b/icarus-miner/data/usr/lib/python2.6/jsonrpc/_tests/test_proxy.py deleted file mode 100644 index 1591fa0..0000000 --- a/icarus-miner/data/usr/lib/python2.6/jsonrpc/_tests/test_proxy.py +++ /dev/null @@ -1,61 +0,0 @@ - -""" - Copyright (c) 2007 Jan-Klaas Kollhof - - This file is part of jsonrpc. - - jsonrpc is free software; you can redistribute it and/or modify - it under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation; either version 2.1 of the License, or - (at your option) any later version. - - This software is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with this software; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -""" - -import unittest -import jsonrpc - -import urllib - -from StringIO import StringIO - -class TestProxy(unittest.TestCase): - - def urlopen(self, url, data): - self.postdata = data - return StringIO(self.respdata) - - def setUp(self): - self.postdata="" - self.urllib_openurl = urllib.urlopen - urllib.urlopen = self.urlopen - - def tearDown(self): - urllib.urlopen = self.urllib_openurl - - def test_ProvidesProxyMethod(self): - s = jsonrpc.ServiceProxy("http://localhost/") - self.assert_(callable(s.echo)) - - def test_MethodCallCallsService(self): - - s = jsonrpc.ServiceProxy("http://localhost/") - - self.respdata='{"result":"foobar","error":null,"id":""}' - echo = s.echo("foobar") - self.assertEquals(self.postdata, jsonrpc.dumps({"method":"echo", 'params':['foobar'], 'id':'jsonrpc'})) - self.assertEquals(echo, 'foobar') - - self.respdata='{"result":null,"error":"MethodNotFound","id":""}' - try: - s.echo("foobar") - except jsonrpc.JSONRPCException,e: - self.assertEquals(e.error, "MethodNotFound") - \ No newline at end of file diff --git a/icarus-miner/data/usr/lib/python2.6/jsonrpc/_tests/test_serviceHandler.py b/icarus-miner/data/usr/lib/python2.6/jsonrpc/_tests/test_serviceHandler.py deleted file mode 100644 index 9c12999..0000000 --- a/icarus-miner/data/usr/lib/python2.6/jsonrpc/_tests/test_serviceHandler.py +++ /dev/null @@ -1,153 +0,0 @@ - -""" - Copyright (c) 2007 Jan-Klaas Kollhof - - This file is part of jsonrpc. - - jsonrpc is free software; you can redistribute it and/or modify - it under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation; either version 2.1 of the License, or - (at your option) any later version. - - This software is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with this software; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -""" - - - - -import unittest -import jsonrpc -from types import * - - -class Service(object): - @jsonrpc.ServiceMethod - def echo(self, arg): - return arg - - def not_a_serviceMethod(self): - pass - - @jsonrpc.ServiceMethod - def raiseError(self): - raise Exception("foobar") - -class Handler(jsonrpc.ServiceHandler): - def __init__(self, service): - self.service=service - - def translateRequest(self, data): - self._requestTranslated=True - return jsonrpc.ServiceHandler.translateRequest(self, data) - - def findServiceEndpoint(self, name): - self._foundServiceEndpoint=True - return jsonrpc.ServiceHandler.findServiceEndpoint(self, name) - - def invokeServiceEndpoint(self, meth, params): - self._invokedEndpoint=True - return jsonrpc.ServiceHandler.invokeServiceEndpoint(self, meth, params) - - def translateResult(self, result, error, id_): - self._resultTranslated=True - return jsonrpc.ServiceHandler.translateResult(self, result, error, id_) - - - -class TestServiceHandler(unittest.TestCase): - - def setUp(self): - self.service = Service() - - def tearDown(self): - pass - - def test_RequestProcessing(self): - handler = Handler(self.service) - json=jsonrpc.dumps({"method":"echo", 'params':['foobar'], 'id':''}) - - result = handler.handleRequest(json) - self.assert_(handler._requestTranslated) - self.assert_(handler._foundServiceEndpoint) - self.assert_(handler._invokedEndpoint) - self.assert_(handler._resultTranslated) - - def test_translateRequest(self): - handler = Handler(self.service) - json=jsonrpc.dumps({"method":"echo", 'params':['foobar'], 'id':''}) - req = handler.translateRequest(json) - self.assertEquals(req['method'], "echo") - self.assertEquals(req['params'],['foobar']) - self.assertEquals(req['id'],'') - - def test_findServiceEndpoint(self): - handler = Handler(self.service) - self.assertRaises(jsonrpc.ServiceMethodNotFound, handler.findServiceEndpoint, "notfound") - self.assertRaises(jsonrpc.ServiceMethodNotFound, handler.findServiceEndpoint, "not_a_serviceMethod") - meth = handler.findServiceEndpoint("echo") - self.assertEquals(self.service.echo, meth) - - def test_invokeEndpoint(self): - handler = Handler(self.service) - meth = handler.findServiceEndpoint("echo") - rslt = handler.invokeServiceEndpoint(meth, ['spam']) - self.assertEquals(rslt, 'spam') - - def test_translateResults(self): - handler=Handler(self.service) - data=handler.translateResult("foobar", None, "spam") - self.assertEquals(jsonrpc.loads(data), {"result":"foobar","id":"spam","error":None}) - - def test_translateError(self): - handler=Handler(self.service) - exc = Exception() - data=handler.translateResult(None, exc, "id") - self.assertEquals(jsonrpc.loads(data), {"result":None,"id":"id","error":{"name":"Exception", "message":""}}) - - def test_translateUnencodableResults(self): - handler=Handler(self.service) - data=handler.translateResult(self, None, "spam") - self.assertEquals(jsonrpc.loads(data), {"result":None,"id":"spam","error":{"name":"JSONEncodeException", "message":"Result Object Not Serializable"}}) - - def test_handleRequestEcho(self): - handler=Handler(self.service) - json=jsonrpc.dumps({"method":"echo", 'params':['foobar'], 'id':''}) - result = handler.handleRequest(json) - self.assertEquals(jsonrpc.loads(result), jsonrpc.loads('{"result":"foobar", "error":null, "id":""}')) - - def test_handleRequestMethodNotFound(self): - handler=Handler(self.service) - json=jsonrpc.dumps({"method":"not_found", 'params':['foobar'], 'id':''}) - result = handler.handleRequest(json) - self.assertEquals(jsonrpc.loads(result), {"result":None, "error":{"name":"ServiceMethodNotFound", "message":""}, "id":""}) - - def test_handleRequestMethodNotAllowed(self): - handler=Handler(self.service) - json=jsonrpc.dumps({"method":"not_a_ServiceMethod", 'params':['foobar'], 'id':''}) - result = handler.handleRequest(json) - self.assertEquals(jsonrpc.loads(result), {"result":None, "error":{"name":"ServiceMethodNotFound", "message":""}, "id":""}) - - def test_handleRequestMethodRaiseError(self): - handler=Handler(self.service) - json=jsonrpc.dumps({"method":"raiseError", 'params':[], 'id':''}) - result = handler.handleRequest(json) - self.assertEquals(jsonrpc.loads(result), {"result":None, "error":{"name":"Exception", "message":"foobar"}, "id":""}) - - def test_handleBadRequestData(self): - handler=Handler(self.service) - json = "This is not a JSON-RPC request" - result = handler.handleRequest(json) - self.assertEquals(jsonrpc.loads(result), {"result":None, "error":{"name":"ServiceRequestNotTranslatable", "message":json}, "id":""}) - - def test_handleBadRequestObject(self): - handler=Handler(self.service) - json = "{}" - result = handler.handleRequest(json) - self.assertEquals(jsonrpc.loads(result), {"result":None, "error":{"name":"BadServiceRequest", "message":json}, "id":""}) diff --git a/icarus-miner/data/usr/lib/python2.6/jsonrpc/cgiwrapper.py b/icarus-miner/data/usr/lib/python2.6/jsonrpc/cgiwrapper.py deleted file mode 100644 index a5bc2b0..0000000 --- a/icarus-miner/data/usr/lib/python2.6/jsonrpc/cgiwrapper.py +++ /dev/null @@ -1,45 +0,0 @@ -import sys, os -from jsonrpc import ServiceHandler - -class CGIServiceHandler(ServiceHandler): - def __init__(self, service): - if service == None: - import __main__ as service - - ServiceHandler.__init__(self, service) - - def handleRequest(self, fin=None, fout=None, env=None): - if fin==None: - fin = sys.stdin - if fout==None: - fout = sys.stdout - if env == None: - env = os.environ - - try: - contLen=int(env['CONTENT_LENGTH']) - data = fin.read(contLen) - except Exception, e: - data = "" - - resultData = ServiceHandler.handleRequest(self, data) - - response = "Content-Type: text/plain\n" - response += "Content-Length: %d\n\n" % len(resultData) - response += resultData - - #on windows all \n are converted to \r\n if stdout is a terminal and is not set to binary mode :( - #this will then cause an incorrect Content-length. - #I have only experienced this problem with apache on Win so far. - if sys.platform == "win32": - try: - import msvcrt - msvcrt.setmode(fout.fileno(), os.O_BINARY) - except: - pass - #put out the response - fout.write(response) - fout.flush() - -def handleCGI(service=None, fin=None, fout=None, env=None): - CGIServiceHandler(service).handleRequest(fin, fout, env) \ No newline at end of file diff --git a/icarus-miner/data/usr/lib/python2.6/jsonrpc/cgiwrapper.pyc b/icarus-miner/data/usr/lib/python2.6/jsonrpc/cgiwrapper.pyc deleted file mode 100644 index d100b8d..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/jsonrpc/cgiwrapper.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/jsonrpc/json.py b/icarus-miner/data/usr/lib/python2.6/jsonrpc/json.py deleted file mode 100644 index 4f945b2..0000000 --- a/icarus-miner/data/usr/lib/python2.6/jsonrpc/json.py +++ /dev/null @@ -1,230 +0,0 @@ - -""" - Copyright (c) 2007 Jan-Klaas Kollhof - - This file is part of jsonrpc. - - jsonrpc is free software; you can redistribute it and/or modify - it under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation; either version 2.1 of the License, or - (at your option) any later version. - - This software is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with this software; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -""" - -from types import * -import re - -CharReplacements ={ - '\t': '\\t', - '\b': '\\b', - '\f': '\\f', - '\n': '\\n', - '\r': '\\r', - '\\': '\\\\', - '/': '\\/', - '"': '\\"'} - -EscapeCharToChar = { - 't': '\t', - 'b': '\b', - 'f': '\f', - 'n': '\n', - 'r': '\r', - '\\': '\\', - '/': '/', - '"' : '"'} - -StringEscapeRE= re.compile(r'[\x00-\x19\\"/\b\f\n\r\t]') -Digits = ['0', '1', '2','3','4','5','6','7','8','9'] - - -class JSONEncodeException(Exception): - def __init__(self, obj): - Exception.__init__(self) - self.obj = obj - - def __str__(self): - return "Object not encodeable: %s" % self.obj - - -class JSONDecodeException(Exception): - def __init__(self, message): - Exception.__init__(self) - self.message = message - - def __str__(self): - return self.message - - -def escapeChar(match): - c=match.group(0) - try: - replacement = CharReplacements[c] - return replacement - except KeyError: - d = ord(c) - if d < 32: - return '\\u%04x' % d - else: - return c - -def dumps(obj): - return unicode("".join([part for part in dumpParts (obj)])) - -def dumpParts (obj): - objType = type(obj) - if obj == None: - yield u'null' - elif objType is BooleanType: - if obj: - yield u'true' - else: - yield u'false' - elif objType is DictionaryType: - yield u'{' - isFirst=True - for (key, value) in obj.items(): - if isFirst: - isFirst=False - else: - yield u"," - yield u'"' + StringEscapeRE.sub(escapeChar, key) +u'":' - for part in dumpParts (value): - yield part - yield u'}' - elif objType in StringTypes: - yield u'"' + StringEscapeRE.sub(escapeChar, obj) +u'"' - - elif objType in [TupleType, ListType, GeneratorType]: - yield u'[' - isFirst=True - for item in obj: - if isFirst: - isFirst=False - else: - yield u"," - for part in dumpParts (item): - yield part - yield u']' - elif objType in [IntType, LongType, FloatType]: - yield unicode(obj) - else: - raise JSONEncodeException(obj) - - -def loads(s): - stack = [] - chars = iter(s) - value = None - currCharIsNext=False - - try: - while(1): - skip = False - if not currCharIsNext: - c = chars.next() - while(c in [' ', '\t', '\r','\n']): - c = chars.next() - currCharIsNext=False - if c=='"': - value = '' - try: - c=chars.next() - while c != '"': - if c == '\\': - c=chars.next() - try: - value+=EscapeCharToChar[c] - except KeyError: - if c == 'u': - hexCode = chars.next() + chars.next() + chars.next() + chars.next() - value += unichr(int(hexCode,16)) - else: - raise JSONDecodeException("Bad Escape Sequence Found") - else: - value+=c - c=chars.next() - except StopIteration: - raise JSONDecodeException("Expected end of String") - elif c == '{': - stack.append({}) - skip=True - elif c =='}': - value = stack.pop() - elif c == '[': - stack.append([]) - skip=True - elif c == ']': - value = stack.pop() - elif c in [',',':']: - skip=True - elif c in Digits or c == '-': - digits=[c] - c = chars.next() - numConv = int - try: - while c in Digits: - digits.append(c) - c = chars.next() - if c == ".": - numConv=float - digits.append(c) - c = chars.next() - while c in Digits: - digits.append(c) - c = chars.next() - if c.upper() == 'E': - digits.append(c) - c = chars.next() - if c in ['+','-']: - digits.append(c) - c = chars.next() - while c in Digits: - digits.append(c) - c = chars.next() - else: - raise JSONDecodeException("Expected + or -") - except StopIteration: - pass - value = numConv("".join(digits)) - currCharIsNext=True - - elif c in ['t','f','n']: - kw = c+ chars.next() + chars.next() + chars.next() - if kw == 'null': - value = None - elif kw == 'true': - value = True - elif kw == 'fals' and chars.next() == 'e': - value = False - else: - raise JSONDecodeException('Expected Null, False or True') - else: - raise JSONDecodeException('Expected []{}," or Number, Null, False or True') - - if not skip: - if len(stack): - top = stack[-1] - if type(top) is ListType: - top.append(value) - elif type(top) is DictionaryType: - stack.append(value) - elif type(top) in StringTypes: - key = stack.pop() - stack[-1][key] = value - else: - raise JSONDecodeException("Expected dictionary key, or start of a value") - else: - return value - except StopIteration: - raise JSONDecodeException("Unexpected end of JSON source") - - diff --git a/icarus-miner/data/usr/lib/python2.6/jsonrpc/json.pyc b/icarus-miner/data/usr/lib/python2.6/jsonrpc/json.pyc deleted file mode 100644 index b78fae3..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/jsonrpc/json.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/jsonrpc/modpywrapper.py b/icarus-miner/data/usr/lib/python2.6/jsonrpc/modpywrapper.py deleted file mode 100644 index 3e61017..0000000 --- a/icarus-miner/data/usr/lib/python2.6/jsonrpc/modpywrapper.py +++ /dev/null @@ -1,52 +0,0 @@ -import sys, os -from jsonrpc import ServiceHandler, ServiceException - - -class ServiceImplementaionNotFound(ServiceException): - pass - - -class ModPyServiceHandler(ServiceHandler): - def __init__(self, req): - self.req = req - ServiceHandler.__init__(self, None) - - - def findServiceEndpoint(self, name): - req = self.req - - (modulePath, fileName) = os.path.split(req.filename) - (moduleName, ext) = os.path.splitext(fileName) - - if not os.path.exists(os.path.join(modulePath, moduleName + ".py")): - raise ServiceImplementaionNotFound() - else: - if not modulePath in sys.path: - sys.path.insert(0, modulePath) - - from mod_python import apache - module = apache.import_module(moduleName, log=1) - - if hasattr(module, "service"): - self.service = module.service - elif hasattr(module, "Service"): - self.service = module.Service() - else: - self.service = module - - return ServiceHandler.findServiceEndpoint(self, name) - - - def handleRequest(self, data): - self.req.content_type = "text/plain" - data = self.req.read() - resultData = ServiceHandler.handleRequest(self, data) - self.req.write(resultData) - self.req.flush() - -def handler(req): - from mod_python import apache - ModPyServiceHandler(req).handleRequest(req) - return apache.OK - - diff --git a/icarus-miner/data/usr/lib/python2.6/jsonrpc/modpywrapper.pyc b/icarus-miner/data/usr/lib/python2.6/jsonrpc/modpywrapper.pyc deleted file mode 100644 index cb6109a..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/jsonrpc/modpywrapper.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/jsonrpc/proxy.py b/icarus-miner/data/usr/lib/python2.6/jsonrpc/proxy.py deleted file mode 100644 index e61fd13..0000000 --- a/icarus-miner/data/usr/lib/python2.6/jsonrpc/proxy.py +++ /dev/null @@ -1,49 +0,0 @@ - -""" - Copyright (c) 2007 Jan-Klaas Kollhof - - This file is part of jsonrpc. - - jsonrpc is free software; you can redistribute it and/or modify - it under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation; either version 2.1 of the License, or - (at your option) any later version. - - This software is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with this software; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -""" - -import urllib -from jsonrpc.json import dumps, loads - -class JSONRPCException(Exception): - def __init__(self, rpcError): - Exception.__init__(self) - self.error = rpcError - -class ServiceProxy(object): - def __init__(self, serviceURL, serviceName=None): - self.__serviceURL = serviceURL - self.__serviceName = serviceName - - def __getattr__(self, name): - if self.__serviceName != None: - name = "%s.%s" % (self.__serviceName, name) - return ServiceProxy(self.__serviceURL, name) - - def __call__(self, *args): - postdata = dumps({"method": self.__serviceName, 'params': args, 'id':'jsonrpc'}) - respdata = urllib.urlopen(self.__serviceURL, postdata).read() - resp = loads(respdata) - if resp['error'] != None: - raise JSONRPCException(resp['error']) - else: - return resp['result'] - - diff --git a/icarus-miner/data/usr/lib/python2.6/jsonrpc/proxy.pyc b/icarus-miner/data/usr/lib/python2.6/jsonrpc/proxy.pyc deleted file mode 100644 index 35aa246..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/jsonrpc/proxy.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/jsonrpc/serviceHandler.py b/icarus-miner/data/usr/lib/python2.6/jsonrpc/serviceHandler.py deleted file mode 100644 index 1660113..0000000 --- a/icarus-miner/data/usr/lib/python2.6/jsonrpc/serviceHandler.py +++ /dev/null @@ -1,113 +0,0 @@ - -""" - Copyright (c) 2007 Jan-Klaas Kollhof - - This file is part of jsonrpc. - - jsonrpc is free software; you can redistribute it and/or modify - it under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation; either version 2.1 of the License, or - (at your option) any later version. - - This software is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with this software; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -""" - -from jsonrpc import loads, dumps, JSONEncodeException - - -def ServiceMethod(fn): - fn.IsServiceMethod = True - return fn - -class ServiceException(Exception): - pass - -class ServiceRequestNotTranslatable(ServiceException): - pass - -class BadServiceRequest(ServiceException): - pass - -class ServiceMethodNotFound(ServiceException): - def __init__(self, name): - self.methodName=name - -class ServiceHandler(object): - - def __init__(self, service): - self.service=service - - def handleRequest(self, json): - err=None - result = None - id_='' - - try: - req = self.translateRequest(json) - except ServiceRequestNotTranslatable, e: - err = e - req={'id':id_} - - if err==None: - try: - id_ = req['id'] - methName = req['method'] - args = req['params'] - except: - err = BadServiceRequest(json) - - if err == None: - try: - meth = self.findServiceEndpoint(methName) - except Exception, e: - err = e - - if err == None: - try: - result = self.invokeServiceEndpoint(meth, args) - except Exception, e: - err = e - - resultdata = self.translateResult(result, err, id_) - - return resultdata - - def translateRequest(self, data): - try: - req = loads(data) - except: - raise ServiceRequestNotTranslatable(data) - return req - - def findServiceEndpoint(self, name): - try: - meth = getattr(self.service, name) - if getattr(meth, "IsServiceMethod"): - return meth - else: - raise ServiceMethodNotFound(name) - except AttributeError: - raise ServiceMethodNotFound(name) - - def invokeServiceEndpoint(self, meth, args): - return meth(*args) - - def translateResult(self, rslt, err, id_): - if err != None: - err = {"name": err.__class__.__name__, "message":err.message} - rslt = None - - try: - data = dumps({"result":rslt,"id":id_,"error":err}) - except JSONEncodeException, e: - err = {"name": "JSONEncodeException", "message":"Result Object Not Serializable"} - data = dumps({"result":None, "id":id_,"error":err}) - - return data \ No newline at end of file diff --git a/icarus-miner/data/usr/lib/python2.6/jsonrpc/serviceHandler.pyc b/icarus-miner/data/usr/lib/python2.6/jsonrpc/serviceHandler.pyc deleted file mode 100644 index eb83ac4..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/jsonrpc/serviceHandler.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/keyword.py b/icarus-miner/data/usr/lib/python2.6/keyword.py deleted file mode 100755 index 8eb2860..0000000 --- a/icarus-miner/data/usr/lib/python2.6/keyword.py +++ /dev/null @@ -1,93 +0,0 @@ -#! /usr/bin/env python - -"""Keywords (from "graminit.c") - -This file is automatically generated; please don't muck it up! - -To update the symbols in this file, 'cd' to the top directory of -the python source tree after building the interpreter and run: - - python Lib/keyword.py -""" - -__all__ = ["iskeyword", "kwlist"] - -kwlist = [ -#--start keywords-- - 'and', - 'as', - 'assert', - 'break', - 'class', - 'continue', - 'def', - 'del', - 'elif', - 'else', - 'except', - 'exec', - 'finally', - 'for', - 'from', - 'global', - 'if', - 'import', - 'in', - 'is', - 'lambda', - 'not', - 'or', - 'pass', - 'print', - 'raise', - 'return', - 'try', - 'while', - 'with', - 'yield', -#--end keywords-- - ] - -iskeyword = frozenset(kwlist).__contains__ - -def main(): - import sys, re - - args = sys.argv[1:] - iptfile = args and args[0] or "Python/graminit.c" - if len(args) > 1: optfile = args[1] - else: optfile = "Lib/keyword.py" - - # scan the source file for keywords - fp = open(iptfile) - strprog = re.compile('"([^"]+)"') - lines = [] - for line in fp: - if '{1, "' in line: - match = strprog.search(line) - if match: - lines.append(" '" + match.group(1) + "',\n") - fp.close() - lines.sort() - - # load the output skeleton from the target - fp = open(optfile) - format = fp.readlines() - fp.close() - - # insert the lines of keywords - try: - start = format.index("#--start keywords--\n") + 1 - end = format.index("#--end keywords--\n") - format[start:end] = lines - except ValueError: - sys.stderr.write("target does not contain format markers\n") - sys.exit(1) - - # write the output file - fp = open(optfile, 'w') - fp.write(''.join(format)) - fp.close() - -if __name__ == "__main__": - main() diff --git a/icarus-miner/data/usr/lib/python2.6/keyword.pyc b/icarus-miner/data/usr/lib/python2.6/keyword.pyc deleted file mode 100644 index 4172315..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/keyword.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/lib-dynload/__future__.py b/icarus-miner/data/usr/lib/python2.6/lib-dynload/__future__.py deleted file mode 100644 index 9156459..0000000 --- a/icarus-miner/data/usr/lib/python2.6/lib-dynload/__future__.py +++ /dev/null @@ -1,128 +0,0 @@ -"""Record of phased-in incompatible language changes. - -Each line is of the form: - - FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease "," - CompilerFlag ")" - -where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples -of the same form as sys.version_info: - - (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int - PY_MINOR_VERSION, # the 1; an int - PY_MICRO_VERSION, # the 0; an int - PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string - PY_RELEASE_SERIAL # the 3; an int - ) - -OptionalRelease records the first release in which - - from __future__ import FeatureName - -was accepted. - -In the case of MandatoryReleases that have not yet occurred, -MandatoryRelease predicts the release in which the feature will become part -of the language. - -Else MandatoryRelease records when the feature became part of the language; -in releases at or after that, modules no longer need - - from __future__ import FeatureName - -to use the feature in question, but may continue to use such imports. - -MandatoryRelease may also be None, meaning that a planned feature got -dropped. - -Instances of class _Feature have two corresponding methods, -.getOptionalRelease() and .getMandatoryRelease(). - -CompilerFlag is the (bitfield) flag that should be passed in the fourth -argument to the builtin function compile() to enable the feature in -dynamically compiled code. This flag is stored in the .compiler_flag -attribute on _Future instances. These values must match the appropriate -#defines of CO_xxx flags in Include/compile.h. - -No feature line is ever to be deleted from this file. -""" - -all_feature_names = [ - "nested_scopes", - "generators", - "division", - "absolute_import", - "with_statement", - "print_function", - "unicode_literals", -] - -__all__ = ["all_feature_names"] + all_feature_names - -# The CO_xxx symbols are defined here under the same names used by -# compile.h, so that an editor search will find them here. However, -# they're not exported in __all__, because they don't really belong to -# this module. -CO_NESTED = 0x0010 # nested_scopes -CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000) -CO_FUTURE_DIVISION = 0x2000 # division -CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default -CO_FUTURE_WITH_STATEMENT = 0x8000 # with statement -CO_FUTURE_PRINT_FUNCTION = 0x10000 # print function -CO_FUTURE_UNICODE_LITERALS = 0x20000 # unicode string literals - -class _Feature: - def __init__(self, optionalRelease, mandatoryRelease, compiler_flag): - self.optional = optionalRelease - self.mandatory = mandatoryRelease - self.compiler_flag = compiler_flag - - def getOptionalRelease(self): - """Return first release in which this feature was recognized. - - This is a 5-tuple, of the same form as sys.version_info. - """ - - return self.optional - - def getMandatoryRelease(self): - """Return release in which this feature will become mandatory. - - This is a 5-tuple, of the same form as sys.version_info, or, if - the feature was dropped, is None. - """ - - return self.mandatory - - def __repr__(self): - return "_Feature" + repr((self.optional, - self.mandatory, - self.compiler_flag)) - -nested_scopes = _Feature((2, 1, 0, "beta", 1), - (2, 2, 0, "alpha", 0), - CO_NESTED) - -generators = _Feature((2, 2, 0, "alpha", 1), - (2, 3, 0, "final", 0), - CO_GENERATOR_ALLOWED) - -division = _Feature((2, 2, 0, "alpha", 2), - (3, 0, 0, "alpha", 0), - CO_FUTURE_DIVISION) - -absolute_import = _Feature((2, 5, 0, "alpha", 1), - (2, 7, 0, "alpha", 0), - CO_FUTURE_ABSOLUTE_IMPORT) - -with_statement = _Feature((2, 5, 0, "alpha", 1), - (2, 6, 0, "alpha", 0), - CO_FUTURE_WITH_STATEMENT) - -print_function = _Feature((2, 6, 0, "alpha", 2), - (3, 0, 0, "alpha", 0), - CO_FUTURE_PRINT_FUNCTION) - -unicode_literals = _Feature((2, 6, 0, "alpha", 2), - (3, 0, 0, "alpha", 0), - CO_FUTURE_UNICODE_LITERALS) diff --git a/icarus-miner/data/usr/lib/python2.6/lib-dynload/__future__.pyc b/icarus-miner/data/usr/lib/python2.6/lib-dynload/__future__.pyc deleted file mode 100644 index f423d02..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/lib-dynload/__future__.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/lib-dynload/_bisect.so b/icarus-miner/data/usr/lib/python2.6/lib-dynload/_bisect.so deleted file mode 100755 index 95006ca..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/lib-dynload/_bisect.so and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/lib-dynload/_collections.so b/icarus-miner/data/usr/lib/python2.6/lib-dynload/_collections.so deleted file mode 100755 index 1dd4c37..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/lib-dynload/_collections.so and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/lib-dynload/_functools.so b/icarus-miner/data/usr/lib/python2.6/lib-dynload/_functools.so deleted file mode 100755 index e5dd89e..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/lib-dynload/_functools.so and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/lib-dynload/_random.so b/icarus-miner/data/usr/lib/python2.6/lib-dynload/_random.so deleted file mode 100755 index 1c8dfb3..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/lib-dynload/_random.so and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/lib-dynload/_socket.so b/icarus-miner/data/usr/lib/python2.6/lib-dynload/_socket.so deleted file mode 100755 index ddc2783..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/lib-dynload/_socket.so and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/lib-dynload/_struct.so b/icarus-miner/data/usr/lib/python2.6/lib-dynload/_struct.so deleted file mode 100755 index 4958b7d..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/lib-dynload/_struct.so and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/lib-dynload/array.so b/icarus-miner/data/usr/lib/python2.6/lib-dynload/array.so deleted file mode 100755 index a872457..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/lib-dynload/array.so and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/lib-dynload/binascii.so b/icarus-miner/data/usr/lib/python2.6/lib-dynload/binascii.so deleted file mode 100755 index 9c13483..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/lib-dynload/binascii.so and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/lib-dynload/fcntl.so b/icarus-miner/data/usr/lib/python2.6/lib-dynload/fcntl.so deleted file mode 100755 index f88e6a1..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/lib-dynload/fcntl.so and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/lib-dynload/itertools.so b/icarus-miner/data/usr/lib/python2.6/lib-dynload/itertools.so deleted file mode 100755 index 7c1c353..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/lib-dynload/itertools.so and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/lib-dynload/math.so b/icarus-miner/data/usr/lib/python2.6/lib-dynload/math.so deleted file mode 100755 index 47493b3..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/lib-dynload/math.so and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/lib-dynload/operator.so b/icarus-miner/data/usr/lib/python2.6/lib-dynload/operator.so deleted file mode 100755 index 6c327f2..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/lib-dynload/operator.so and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/lib-dynload/random.py b/icarus-miner/data/usr/lib/python2.6/lib-dynload/random.py deleted file mode 100644 index 568de88..0000000 --- a/icarus-miner/data/usr/lib/python2.6/lib-dynload/random.py +++ /dev/null @@ -1,898 +0,0 @@ -"""Random variable generators. - - integers - -------- - uniform within range - - sequences - --------- - pick random element - pick random sample - generate random permutation - - distributions on the real line: - ------------------------------ - uniform - triangular - normal (Gaussian) - lognormal - negative exponential - gamma - beta - pareto - Weibull - - distributions on the circle (angles 0 to 2pi) - --------------------------------------------- - circular uniform - von Mises - -General notes on the underlying Mersenne Twister core generator: - -* The period is 2**19937-1. -* It is one of the most extensively tested generators in existence. -* Without a direct way to compute N steps forward, the semantics of - jumpahead(n) are weakened to simply jump to another distant state and rely - on the large period to avoid overlapping sequences. -* The random() method is implemented in C, executes in a single Python step, - and is, therefore, threadsafe. - -""" - -from __future__ import division -from warnings import warn as _warn -from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType -from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil -from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin -from os import urandom as _urandom -from binascii import hexlify as _hexlify - -__all__ = ["Random","seed","random","uniform","randint","choice","sample", - "randrange","shuffle","normalvariate","lognormvariate", - "expovariate","vonmisesvariate","gammavariate","triangular", - "gauss","betavariate","paretovariate","weibullvariate", - "getstate","setstate","jumpahead", "WichmannHill", "getrandbits", - "SystemRandom"] - -NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0) -TWOPI = 2.0*_pi -LOG4 = _log(4.0) -SG_MAGICCONST = 1.0 + _log(4.5) -BPF = 53 # Number of bits in a float -RECIP_BPF = 2**-BPF - - -# Translated by Guido van Rossum from C source provided by -# Adrian Baddeley. Adapted by Raymond Hettinger for use with -# the Mersenne Twister and os.urandom() core generators. - -import _random - -class Random(_random.Random): - """Random number generator base class used by bound module functions. - - Used to instantiate instances of Random to get generators that don't - share state. Especially useful for multi-threaded programs, creating - a different instance of Random for each thread, and using the jumpahead() - method to ensure that the generated sequences seen by each thread don't - overlap. - - Class Random can also be subclassed if you want to use a different basic - generator of your own devising: in that case, override the following - methods: random(), seed(), getstate(), setstate() and jumpahead(). - Optionally, implement a getrandbits() method so that randrange() can cover - arbitrarily large ranges. - - """ - - VERSION = 3 # used by getstate/setstate - - def __init__(self, x=None): - """Initialize an instance. - - Optional argument x controls seeding, as for Random.seed(). - """ - - self.seed(x) - self.gauss_next = None - - def seed(self, a=None): - """Initialize internal state from hashable object. - - None or no argument seeds from current time or from an operating - system specific randomness source if available. - - If a is not None or an int or long, hash(a) is used instead. - """ - - if a is None: - try: - a = long(_hexlify(_urandom(16)), 16) - except NotImplementedError: - import time - a = long(time.time() * 256) # use fractional seconds - - super(Random, self).seed(a) - self.gauss_next = None - - def getstate(self): - """Return internal state; can be passed to setstate() later.""" - return self.VERSION, super(Random, self).getstate(), self.gauss_next - - def setstate(self, state): - """Restore internal state from object returned by getstate().""" - version = state[0] - if version == 3: - version, internalstate, self.gauss_next = state - super(Random, self).setstate(internalstate) - elif version == 2: - version, internalstate, self.gauss_next = state - # In version 2, the state was saved as signed ints, which causes - # inconsistencies between 32/64-bit systems. The state is - # really unsigned 32-bit ints, so we convert negative ints from - # version 2 to positive longs for version 3. - try: - internalstate = tuple( long(x) % (2**32) for x in internalstate ) - except ValueError, e: - raise TypeError, e - super(Random, self).setstate(internalstate) - else: - raise ValueError("state with version %s passed to " - "Random.setstate() of version %s" % - (version, self.VERSION)) - -## ---- Methods below this point do not need to be overridden when -## ---- subclassing for the purpose of using a different core generator. - -## -------------------- pickle support ------------------- - - def __getstate__(self): # for pickle - return self.getstate() - - def __setstate__(self, state): # for pickle - self.setstate(state) - - def __reduce__(self): - return self.__class__, (), self.getstate() - -## -------------------- integer methods ------------------- - - def randrange(self, start, stop=None, step=1, int=int, default=None, - maxwidth=1L< 0: - if istart >= maxwidth: - return self._randbelow(istart) - return int(self.random() * istart) - raise ValueError, "empty range for randrange()" - - # stop argument supplied. - istop = int(stop) - if istop != stop: - raise ValueError, "non-integer stop for randrange()" - width = istop - istart - if step == 1 and width > 0: - # Note that - # int(istart + self.random()*width) - # instead would be incorrect. For example, consider istart - # = -2 and istop = 0. Then the guts would be in - # -2.0 to 0.0 exclusive on both ends (ignoring that random() - # might return 0.0), and because int() truncates toward 0, the - # final result would be -1 or 0 (instead of -2 or -1). - # istart + int(self.random()*width) - # would also be incorrect, for a subtler reason: the RHS - # can return a long, and then randrange() would also return - # a long, but we're supposed to return an int (for backward - # compatibility). - - if width >= maxwidth: - return int(istart + self._randbelow(width)) - return int(istart + int(self.random()*width)) - if step == 1: - raise ValueError, "empty range for randrange() (%d,%d, %d)" % (istart, istop, width) - - # Non-unit step argument supplied. - istep = int(step) - if istep != step: - raise ValueError, "non-integer step for randrange()" - if istep > 0: - n = (width + istep - 1) // istep - elif istep < 0: - n = (width + istep + 1) // istep - else: - raise ValueError, "zero step for randrange()" - - if n <= 0: - raise ValueError, "empty range for randrange()" - - if n >= maxwidth: - return istart + istep*self._randbelow(n) - return istart + istep*int(self.random() * n) - - def randint(self, a, b): - """Return random integer in range [a, b], including both end points. - """ - - return self.randrange(a, b+1) - - def _randbelow(self, n, _log=_log, int=int, _maxwidth=1L< n-1 > 2**(k-2) - r = getrandbits(k) - while r >= n: - r = getrandbits(k) - return r - if n >= _maxwidth: - _warn("Underlying random() generator does not supply \n" - "enough bits to choose from a population range this large") - return int(self.random() * n) - -## -------------------- sequence methods ------------------- - - def choice(self, seq): - """Choose a random element from a non-empty sequence.""" - return seq[int(self.random() * len(seq))] # raises IndexError if seq is empty - - def shuffle(self, x, random=None, int=int): - """x, random=random.random -> shuffle list x in place; return None. - - Optional arg random is a 0-argument function returning a random - float in [0.0, 1.0); by default, the standard random.random. - """ - - if random is None: - random = self.random - for i in reversed(xrange(1, len(x))): - # pick an element in x[:i+1] with which to exchange x[i] - j = int(random() * (i+1)) - x[i], x[j] = x[j], x[i] - - def sample(self, population, k): - """Chooses k unique random elements from a population sequence. - - Returns a new list containing elements from the population while - leaving the original population unchanged. The resulting list is - in selection order so that all sub-slices will also be valid random - samples. This allows raffle winners (the sample) to be partitioned - into grand prize and second place winners (the subslices). - - Members of the population need not be hashable or unique. If the - population contains repeats, then each occurrence is a possible - selection in the sample. - - To choose a sample in a range of integers, use xrange as an argument. - This is especially fast and space efficient for sampling from a - large population: sample(xrange(10000000), 60) - """ - - # XXX Although the documentation says `population` is "a sequence", - # XXX attempts are made to cater to any iterable with a __len__ - # XXX method. This has had mixed success. Examples from both - # XXX sides: sets work fine, and should become officially supported; - # XXX dicts are much harder, and have failed in various subtle - # XXX ways across attempts. Support for mapping types should probably - # XXX be dropped (and users should pass mapping.keys() or .values() - # XXX explicitly). - - # Sampling without replacement entails tracking either potential - # selections (the pool) in a list or previous selections in a set. - - # When the number of selections is small compared to the - # population, then tracking selections is efficient, requiring - # only a small set and an occasional reselection. For - # a larger number of selections, the pool tracking method is - # preferred since the list takes less space than the - # set and it doesn't suffer from frequent reselections. - - n = len(population) - if not 0 <= k <= n: - raise ValueError, "sample larger than population" - random = self.random - _int = int - result = [None] * k - setsize = 21 # size of a small set minus size of an empty list - if k > 5: - setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets - if n <= setsize or hasattr(population, "keys"): - # An n-length list is smaller than a k-length set, or this is a - # mapping type so the other algorithm wouldn't work. - pool = list(population) - for i in xrange(k): # invariant: non-selected at [0,n-i) - j = _int(random() * (n-i)) - result[i] = pool[j] - pool[j] = pool[n-i-1] # move non-selected item into vacancy - else: - try: - selected = set() - selected_add = selected.add - for i in xrange(k): - j = _int(random() * n) - while j in selected: - j = _int(random() * n) - selected_add(j) - result[i] = population[j] - except (TypeError, KeyError): # handle (at least) sets - if isinstance(population, list): - raise - return self.sample(tuple(population), k) - return result - -## -------------------- real-valued distributions ------------------- - -## -------------------- uniform distribution ------------------- - - def uniform(self, a, b): - "Get a random number in the range [a, b) or [a, b] depending on rounding." - return a + (b-a) * self.random() - -## -------------------- triangular -------------------- - - def triangular(self, low=0.0, high=1.0, mode=None): - """Triangular distribution. - - Continuous distribution bounded by given lower and upper limits, - and having a given mode value in-between. - - http://en.wikipedia.org/wiki/Triangular_distribution - - """ - u = self.random() - c = 0.5 if mode is None else (mode - low) / (high - low) - if u > c: - u = 1.0 - u - c = 1.0 - c - low, high = high, low - return low + (high - low) * (u * c) ** 0.5 - -## -------------------- normal distribution -------------------- - - def normalvariate(self, mu, sigma): - """Normal distribution. - - mu is the mean, and sigma is the standard deviation. - - """ - # mu = mean, sigma = standard deviation - - # Uses Kinderman and Monahan method. Reference: Kinderman, - # A.J. and Monahan, J.F., "Computer generation of random - # variables using the ratio of uniform deviates", ACM Trans - # Math Software, 3, (1977), pp257-260. - - random = self.random - while 1: - u1 = random() - u2 = 1.0 - random() - z = NV_MAGICCONST*(u1-0.5)/u2 - zz = z*z/4.0 - if zz <= -_log(u2): - break - return mu + z*sigma - -## -------------------- lognormal distribution -------------------- - - def lognormvariate(self, mu, sigma): - """Log normal distribution. - - If you take the natural logarithm of this distribution, you'll get a - normal distribution with mean mu and standard deviation sigma. - mu can have any value, and sigma must be greater than zero. - - """ - return _exp(self.normalvariate(mu, sigma)) - -## -------------------- exponential distribution -------------------- - - def expovariate(self, lambd): - """Exponential distribution. - - lambd is 1.0 divided by the desired mean. It should be - nonzero. (The parameter would be called "lambda", but that is - a reserved word in Python.) Returned values range from 0 to - positive infinity if lambd is positive, and from negative - infinity to 0 if lambd is negative. - - """ - # lambd: rate lambd = 1/mean - # ('lambda' is a Python reserved word) - - random = self.random - u = random() - while u <= 1e-7: - u = random() - return -_log(u)/lambd - -## -------------------- von Mises distribution -------------------- - - def vonmisesvariate(self, mu, kappa): - """Circular data distribution. - - mu is the mean angle, expressed in radians between 0 and 2*pi, and - kappa is the concentration parameter, which must be greater than or - equal to zero. If kappa is equal to zero, this distribution reduces - to a uniform random angle over the range 0 to 2*pi. - - """ - # mu: mean angle (in radians between 0 and 2*pi) - # kappa: concentration parameter kappa (>= 0) - # if kappa = 0 generate uniform random angle - - # Based upon an algorithm published in: Fisher, N.I., - # "Statistical Analysis of Circular Data", Cambridge - # University Press, 1993. - - # Thanks to Magnus Kessler for a correction to the - # implementation of step 4. - - random = self.random - if kappa <= 1e-6: - return TWOPI * random() - - a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa) - b = (a - _sqrt(2.0 * a))/(2.0 * kappa) - r = (1.0 + b * b)/(2.0 * b) - - while 1: - u1 = random() - - z = _cos(_pi * u1) - f = (1.0 + r * z)/(r + z) - c = kappa * (r - f) - - u2 = random() - - if u2 < c * (2.0 - c) or u2 <= c * _exp(1.0 - c): - break - - u3 = random() - if u3 > 0.5: - theta = (mu % TWOPI) + _acos(f) - else: - theta = (mu % TWOPI) - _acos(f) - - return theta - -## -------------------- gamma distribution -------------------- - - def gammavariate(self, alpha, beta): - """Gamma distribution. Not the gamma function! - - Conditions on the parameters are alpha > 0 and beta > 0. - - """ - - # alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2 - - # Warning: a few older sources define the gamma distribution in terms - # of alpha > -1.0 - if alpha <= 0.0 or beta <= 0.0: - raise ValueError, 'gammavariate: alpha and beta must be > 0.0' - - random = self.random - if alpha > 1.0: - - # Uses R.C.H. Cheng, "The generation of Gamma - # variables with non-integral shape parameters", - # Applied Statistics, (1977), 26, No. 1, p71-74 - - ainv = _sqrt(2.0 * alpha - 1.0) - bbb = alpha - LOG4 - ccc = alpha + ainv - - while 1: - u1 = random() - if not 1e-7 < u1 < .9999999: - continue - u2 = 1.0 - random() - v = _log(u1/(1.0-u1))/ainv - x = alpha*_exp(v) - z = u1*u1*u2 - r = bbb+ccc*v-x - if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z): - return x * beta - - elif alpha == 1.0: - # expovariate(1) - u = random() - while u <= 1e-7: - u = random() - return -_log(u) * beta - - else: # alpha is between 0 and 1 (exclusive) - - # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle - - while 1: - u = random() - b = (_e + alpha)/_e - p = b*u - if p <= 1.0: - x = p ** (1.0/alpha) - else: - x = -_log((b-p)/alpha) - u1 = random() - if p > 1.0: - if u1 <= x ** (alpha - 1.0): - break - elif u1 <= _exp(-x): - break - return x * beta - -## -------------------- Gauss (faster alternative) -------------------- - - def gauss(self, mu, sigma): - """Gaussian distribution. - - mu is the mean, and sigma is the standard deviation. This is - slightly faster than the normalvariate() function. - - Not thread-safe without a lock around calls. - - """ - - # When x and y are two variables from [0, 1), uniformly - # distributed, then - # - # cos(2*pi*x)*sqrt(-2*log(1-y)) - # sin(2*pi*x)*sqrt(-2*log(1-y)) - # - # are two *independent* variables with normal distribution - # (mu = 0, sigma = 1). - # (Lambert Meertens) - # (corrected version; bug discovered by Mike Miller, fixed by LM) - - # Multithreading note: When two threads call this function - # simultaneously, it is possible that they will receive the - # same return value. The window is very small though. To - # avoid this, you have to use a lock around all calls. (I - # didn't want to slow this down in the serial case by using a - # lock here.) - - random = self.random - z = self.gauss_next - self.gauss_next = None - if z is None: - x2pi = random() * TWOPI - g2rad = _sqrt(-2.0 * _log(1.0 - random())) - z = _cos(x2pi) * g2rad - self.gauss_next = _sin(x2pi) * g2rad - - return mu + z*sigma - -## -------------------- beta -------------------- -## See -## http://sourceforge.net/bugs/?func=detailbug&bug_id=130030&group_id=5470 -## for Ivan Frohne's insightful analysis of why the original implementation: -## -## def betavariate(self, alpha, beta): -## # Discrete Event Simulation in C, pp 87-88. -## -## y = self.expovariate(alpha) -## z = self.expovariate(1.0/beta) -## return z/(y+z) -## -## was dead wrong, and how it probably got that way. - - def betavariate(self, alpha, beta): - """Beta distribution. - - Conditions on the parameters are alpha > 0 and beta > 0. - Returned values range between 0 and 1. - - """ - - # This version due to Janne Sinkkonen, and matches all the std - # texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution"). - y = self.gammavariate(alpha, 1.) - if y == 0: - return 0.0 - else: - return y / (y + self.gammavariate(beta, 1.)) - -## -------------------- Pareto -------------------- - - def paretovariate(self, alpha): - """Pareto distribution. alpha is the shape parameter.""" - # Jain, pg. 495 - - u = 1.0 - self.random() - return 1.0 / pow(u, 1.0/alpha) - -## -------------------- Weibull -------------------- - - def weibullvariate(self, alpha, beta): - """Weibull distribution. - - alpha is the scale parameter and beta is the shape parameter. - - """ - # Jain, pg. 499; bug fix courtesy Bill Arms - - u = 1.0 - self.random() - return alpha * pow(-_log(u), 1.0/beta) - -## -------------------- Wichmann-Hill ------------------- - -class WichmannHill(Random): - - VERSION = 1 # used by getstate/setstate - - def seed(self, a=None): - """Initialize internal state from hashable object. - - None or no argument seeds from current time or from an operating - system specific randomness source if available. - - If a is not None or an int or long, hash(a) is used instead. - - If a is an int or long, a is used directly. Distinct values between - 0 and 27814431486575L inclusive are guaranteed to yield distinct - internal states (this guarantee is specific to the default - Wichmann-Hill generator). - """ - - if a is None: - try: - a = long(_hexlify(_urandom(16)), 16) - except NotImplementedError: - import time - a = long(time.time() * 256) # use fractional seconds - - if not isinstance(a, (int, long)): - a = hash(a) - - a, x = divmod(a, 30268) - a, y = divmod(a, 30306) - a, z = divmod(a, 30322) - self._seed = int(x)+1, int(y)+1, int(z)+1 - - self.gauss_next = None - - def random(self): - """Get the next random number in the range [0.0, 1.0).""" - - # Wichman-Hill random number generator. - # - # Wichmann, B. A. & Hill, I. D. (1982) - # Algorithm AS 183: - # An efficient and portable pseudo-random number generator - # Applied Statistics 31 (1982) 188-190 - # - # see also: - # Correction to Algorithm AS 183 - # Applied Statistics 33 (1984) 123 - # - # McLeod, A. I. (1985) - # A remark on Algorithm AS 183 - # Applied Statistics 34 (1985),198-200 - - # This part is thread-unsafe: - # BEGIN CRITICAL SECTION - x, y, z = self._seed - x = (171 * x) % 30269 - y = (172 * y) % 30307 - z = (170 * z) % 30323 - self._seed = x, y, z - # END CRITICAL SECTION - - # Note: on a platform using IEEE-754 double arithmetic, this can - # never return 0.0 (asserted by Tim; proof too long for a comment). - return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0 - - def getstate(self): - """Return internal state; can be passed to setstate() later.""" - return self.VERSION, self._seed, self.gauss_next - - def setstate(self, state): - """Restore internal state from object returned by getstate().""" - version = state[0] - if version == 1: - version, self._seed, self.gauss_next = state - else: - raise ValueError("state with version %s passed to " - "Random.setstate() of version %s" % - (version, self.VERSION)) - - def jumpahead(self, n): - """Act as if n calls to random() were made, but quickly. - - n is an int, greater than or equal to 0. - - Example use: If you have 2 threads and know that each will - consume no more than a million random numbers, create two Random - objects r1 and r2, then do - r2.setstate(r1.getstate()) - r2.jumpahead(1000000) - Then r1 and r2 will use guaranteed-disjoint segments of the full - period. - """ - - if not n >= 0: - raise ValueError("n must be >= 0") - x, y, z = self._seed - x = int(x * pow(171, n, 30269)) % 30269 - y = int(y * pow(172, n, 30307)) % 30307 - z = int(z * pow(170, n, 30323)) % 30323 - self._seed = x, y, z - - def __whseed(self, x=0, y=0, z=0): - """Set the Wichmann-Hill seed from (x, y, z). - - These must be integers in the range [0, 256). - """ - - if not type(x) == type(y) == type(z) == int: - raise TypeError('seeds must be integers') - if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256): - raise ValueError('seeds must be in range(0, 256)') - if 0 == x == y == z: - # Initialize from current time - import time - t = long(time.time() * 256) - t = int((t&0xffffff) ^ (t>>24)) - t, x = divmod(t, 256) - t, y = divmod(t, 256) - t, z = divmod(t, 256) - # Zero is a poor seed, so substitute 1 - self._seed = (x or 1, y or 1, z or 1) - - self.gauss_next = None - - def whseed(self, a=None): - """Seed from hashable object's hash code. - - None or no argument seeds from current time. It is not guaranteed - that objects with distinct hash codes lead to distinct internal - states. - - This is obsolete, provided for compatibility with the seed routine - used prior to Python 2.1. Use the .seed() method instead. - """ - - if a is None: - self.__whseed() - return - a = hash(a) - a, x = divmod(a, 256) - a, y = divmod(a, 256) - a, z = divmod(a, 256) - x = (x + a) % 256 or 1 - y = (y + a) % 256 or 1 - z = (z + a) % 256 or 1 - self.__whseed(x, y, z) - -## --------------- Operating System Random Source ------------------ - -class SystemRandom(Random): - """Alternate random number generator using sources provided - by the operating system (such as /dev/urandom on Unix or - CryptGenRandom on Windows). - - Not available on all systems (see os.urandom() for details). - """ - - def random(self): - """Get the next random number in the range [0.0, 1.0).""" - return (long(_hexlify(_urandom(7)), 16) >> 3) * RECIP_BPF - - def getrandbits(self, k): - """getrandbits(k) -> x. Generates a long int with k random bits.""" - if k <= 0: - raise ValueError('number of bits must be greater than zero') - if k != int(k): - raise TypeError('number of bits should be an integer') - bytes = (k + 7) // 8 # bits / 8 and rounded up - x = long(_hexlify(_urandom(bytes)), 16) - return x >> (bytes * 8 - k) # trim excess bits - - def _stub(self, *args, **kwds): - "Stub method. Not used for a system random number generator." - return None - seed = jumpahead = _stub - - def _notimplemented(self, *args, **kwds): - "Method should not be called for a system random number generator." - raise NotImplementedError('System entropy source does not have state.') - getstate = setstate = _notimplemented - -## -------------------- test program -------------------- - -def _test_generator(n, func, args): - import time - print n, 'times', func.__name__ - total = 0.0 - sqsum = 0.0 - smallest = 1e10 - largest = -1e10 - t0 = time.time() - for i in range(n): - x = func(*args) - total += x - sqsum = sqsum + x*x - smallest = min(x, smallest) - largest = max(x, largest) - t1 = time.time() - print round(t1-t0, 3), 'sec,', - avg = total/n - stddev = _sqrt(sqsum/n - avg*avg) - print 'avg %g, stddev %g, min %g, max %g' % \ - (avg, stddev, smallest, largest) - - -def _test(N=2000): - _test_generator(N, random, ()) - _test_generator(N, normalvariate, (0.0, 1.0)) - _test_generator(N, lognormvariate, (0.0, 1.0)) - _test_generator(N, vonmisesvariate, (0.0, 1.0)) - _test_generator(N, gammavariate, (0.01, 1.0)) - _test_generator(N, gammavariate, (0.1, 1.0)) - _test_generator(N, gammavariate, (0.1, 2.0)) - _test_generator(N, gammavariate, (0.5, 1.0)) - _test_generator(N, gammavariate, (0.9, 1.0)) - _test_generator(N, gammavariate, (1.0, 1.0)) - _test_generator(N, gammavariate, (2.0, 1.0)) - _test_generator(N, gammavariate, (20.0, 1.0)) - _test_generator(N, gammavariate, (200.0, 1.0)) - _test_generator(N, gauss, (0.0, 1.0)) - _test_generator(N, betavariate, (3.0, 3.0)) - _test_generator(N, triangular, (0.0, 1.0, 1.0/3.0)) - -# Create one instance, seeded from current time, and export its methods -# as module-level functions. The functions share state across all uses -#(both in the user's code and in the Python libraries), but that's fine -# for most programs and is easier for the casual user than making them -# instantiate their own Random() instance. - -_inst = Random() -seed = _inst.seed -random = _inst.random -uniform = _inst.uniform -triangular = _inst.triangular -randint = _inst.randint -choice = _inst.choice -randrange = _inst.randrange -sample = _inst.sample -shuffle = _inst.shuffle -normalvariate = _inst.normalvariate -lognormvariate = _inst.lognormvariate -expovariate = _inst.expovariate -vonmisesvariate = _inst.vonmisesvariate -gammavariate = _inst.gammavariate -gauss = _inst.gauss -betavariate = _inst.betavariate -paretovariate = _inst.paretovariate -weibullvariate = _inst.weibullvariate -getstate = _inst.getstate -setstate = _inst.setstate -jumpahead = _inst.jumpahead -getrandbits = _inst.getrandbits - -if __name__ == '__main__': - _test() diff --git a/icarus-miner/data/usr/lib/python2.6/lib-dynload/random.pyc b/icarus-miner/data/usr/lib/python2.6/lib-dynload/random.pyc deleted file mode 100644 index 08d9147..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/lib-dynload/random.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/lib-dynload/select.so b/icarus-miner/data/usr/lib/python2.6/lib-dynload/select.so deleted file mode 100755 index 476475c..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/lib-dynload/select.so and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/lib-dynload/tempfile.py b/icarus-miner/data/usr/lib/python2.6/lib-dynload/tempfile.py deleted file mode 100644 index b0fd87d..0000000 --- a/icarus-miner/data/usr/lib/python2.6/lib-dynload/tempfile.py +++ /dev/null @@ -1,603 +0,0 @@ -"""Temporary files. - -This module provides generic, low- and high-level interfaces for -creating temporary files and directories. The interfaces listed -as "safe" just below can be used without fear of race conditions. -Those listed as "unsafe" cannot, and are provided for backward -compatibility only. - -This module also provides some data items to the user: - - TMP_MAX - maximum number of names that will be tried before - giving up. - template - the default prefix for all temporary names. - You may change this to control the default prefix. - tempdir - If this is set to a string before the first use of - any routine from this module, it will be considered as - another candidate location to store temporary files. -""" - -__all__ = [ - "NamedTemporaryFile", "TemporaryFile", # high level safe interfaces - "SpooledTemporaryFile", - "mkstemp", "mkdtemp", # low level safe interfaces - "mktemp", # deprecated unsafe interface - "TMP_MAX", "gettempprefix", # constants - "tempdir", "gettempdir" - ] - - -# Imports. - -import os as _os -import errno as _errno -from random import Random as _Random - -try: - from cStringIO import StringIO as _StringIO -except ImportError: - from StringIO import StringIO as _StringIO - -try: - import fcntl as _fcntl -except ImportError: - def _set_cloexec(fd): - pass -else: - def _set_cloexec(fd): - try: - flags = _fcntl.fcntl(fd, _fcntl.F_GETFD, 0) - except IOError: - pass - else: - # flags read successfully, modify - flags |= _fcntl.FD_CLOEXEC - _fcntl.fcntl(fd, _fcntl.F_SETFD, flags) - - -try: - import thread as _thread -except ImportError: - import dummy_thread as _thread -_allocate_lock = _thread.allocate_lock - -_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL -if hasattr(_os, 'O_NOINHERIT'): - _text_openflags |= _os.O_NOINHERIT -if hasattr(_os, 'O_NOFOLLOW'): - _text_openflags |= _os.O_NOFOLLOW - -_bin_openflags = _text_openflags -if hasattr(_os, 'O_BINARY'): - _bin_openflags |= _os.O_BINARY - -if hasattr(_os, 'TMP_MAX'): - TMP_MAX = _os.TMP_MAX -else: - TMP_MAX = 10000 - -template = "tmp" - -# Internal routines. - -_once_lock = _allocate_lock() - -if hasattr(_os, "lstat"): - _stat = _os.lstat -elif hasattr(_os, "stat"): - _stat = _os.stat -else: - # Fallback. All we need is something that raises os.error if the - # file doesn't exist. - def _stat(fn): - try: - f = open(fn) - except IOError: - raise _os.error - f.close() - -def _exists(fn): - try: - _stat(fn) - except _os.error: - return False - else: - return True - -class _RandomNameSequence: - """An instance of _RandomNameSequence generates an endless - sequence of unpredictable strings which can safely be incorporated - into file names. Each string is six characters long. Multiple - threads can safely use the same instance at the same time. - - _RandomNameSequence is an iterator.""" - - characters = ("abcdefghijklmnopqrstuvwxyz" + - "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + - "0123456789_") - - def __init__(self): - self.mutex = _allocate_lock() - self.rng = _Random() - self.normcase = _os.path.normcase - - def __iter__(self): - return self - - def next(self): - m = self.mutex - c = self.characters - choose = self.rng.choice - - m.acquire() - try: - letters = [choose(c) for dummy in "123456"] - finally: - m.release() - - return self.normcase(''.join(letters)) - -def _candidate_tempdir_list(): - """Generate a list of candidate temporary directories which - _get_default_tempdir will try.""" - - dirlist = [] - - # First, try the environment. - for envname in 'TMPDIR', 'TEMP', 'TMP': - dirname = _os.getenv(envname) - if dirname: dirlist.append(dirname) - - # Failing that, try OS-specific locations. - if _os.name == 'riscos': - dirname = _os.getenv('Wimp$ScrapDir') - if dirname: dirlist.append(dirname) - elif _os.name == 'nt': - dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ]) - else: - dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ]) - - # As a last resort, the current directory. - try: - dirlist.append(_os.getcwd()) - except (AttributeError, _os.error): - dirlist.append(_os.curdir) - - return dirlist - -def _get_default_tempdir(): - """Calculate the default directory to use for temporary files. - This routine should be called exactly once. - - We determine whether or not a candidate temp dir is usable by - trying to create and write to a file in that directory. If this - is successful, the test file is deleted. To prevent denial of - service, the name of the test file must be randomized.""" - - namer = _RandomNameSequence() - dirlist = _candidate_tempdir_list() - flags = _text_openflags - - for dir in dirlist: - if dir != _os.curdir: - dir = _os.path.normcase(_os.path.abspath(dir)) - # Try only a few names per directory. - for seq in xrange(100): - name = namer.next() - filename = _os.path.join(dir, name) - try: - fd = _os.open(filename, flags, 0600) - fp = _os.fdopen(fd, 'w') - fp.write('blat') - fp.close() - _os.unlink(filename) - del fp, fd - return dir - except (OSError, IOError), e: - if e[0] != _errno.EEXIST: - break # no point trying more names in this directory - pass - raise IOError, (_errno.ENOENT, - ("No usable temporary directory found in %s" % dirlist)) - -_name_sequence = None - -def _get_candidate_names(): - """Common setup sequence for all user-callable interfaces.""" - - global _name_sequence - if _name_sequence is None: - _once_lock.acquire() - try: - if _name_sequence is None: - _name_sequence = _RandomNameSequence() - finally: - _once_lock.release() - return _name_sequence - - -def _mkstemp_inner(dir, pre, suf, flags): - """Code common to mkstemp, TemporaryFile, and NamedTemporaryFile.""" - - names = _get_candidate_names() - - for seq in xrange(TMP_MAX): - name = names.next() - file = _os.path.join(dir, pre + name + suf) - try: - fd = _os.open(file, flags, 0600) - _set_cloexec(fd) - return (fd, _os.path.abspath(file)) - except OSError, e: - if e.errno == _errno.EEXIST: - continue # try again - raise - - raise IOError, (_errno.EEXIST, "No usable temporary file name found") - - -# User visible interfaces. - -def gettempprefix(): - """Accessor for tempdir.template.""" - return template - -tempdir = None - -def gettempdir(): - """Accessor for tempfile.tempdir.""" - global tempdir - if tempdir is None: - _once_lock.acquire() - try: - if tempdir is None: - tempdir = _get_default_tempdir() - finally: - _once_lock.release() - return tempdir - -def mkstemp(suffix="", prefix=template, dir=None, text=False): - """User-callable function to create and return a unique temporary - file. The return value is a pair (fd, name) where fd is the - file descriptor returned by os.open, and name is the filename. - - If 'suffix' is specified, the file name will end with that suffix, - otherwise there will be no suffix. - - If 'prefix' is specified, the file name will begin with that prefix, - otherwise a default prefix is used. - - If 'dir' is specified, the file will be created in that directory, - otherwise a default directory is used. - - If 'text' is specified and true, the file is opened in text - mode. Else (the default) the file is opened in binary mode. On - some operating systems, this makes no difference. - - The file is readable and writable only by the creating user ID. - If the operating system uses permission bits to indicate whether a - file is executable, the file is executable by no one. The file - descriptor is not inherited by children of this process. - - Caller is responsible for deleting the file when done with it. - """ - - if dir is None: - dir = gettempdir() - - if text: - flags = _text_openflags - else: - flags = _bin_openflags - - return _mkstemp_inner(dir, prefix, suffix, flags) - - -def mkdtemp(suffix="", prefix=template, dir=None): - """User-callable function to create and return a unique temporary - directory. The return value is the pathname of the directory. - - Arguments are as for mkstemp, except that the 'text' argument is - not accepted. - - The directory is readable, writable, and searchable only by the - creating user. - - Caller is responsible for deleting the directory when done with it. - """ - - if dir is None: - dir = gettempdir() - - names = _get_candidate_names() - - for seq in xrange(TMP_MAX): - name = names.next() - file = _os.path.join(dir, prefix + name + suffix) - try: - _os.mkdir(file, 0700) - return file - except OSError, e: - if e.errno == _errno.EEXIST: - continue # try again - raise - - raise IOError, (_errno.EEXIST, "No usable temporary directory name found") - -def mktemp(suffix="", prefix=template, dir=None): - """User-callable function to return a unique temporary file name. The - file is not created. - - Arguments are as for mkstemp, except that the 'text' argument is - not accepted. - - This function is unsafe and should not be used. The file name - refers to a file that did not exist at some point, but by the time - you get around to creating it, someone else may have beaten you to - the punch. - """ - -## from warnings import warn as _warn -## _warn("mktemp is a potential security risk to your program", -## RuntimeWarning, stacklevel=2) - - if dir is None: - dir = gettempdir() - - names = _get_candidate_names() - for seq in xrange(TMP_MAX): - name = names.next() - file = _os.path.join(dir, prefix + name + suffix) - if not _exists(file): - return file - - raise IOError, (_errno.EEXIST, "No usable temporary filename found") - - -class _TemporaryFileWrapper: - """Temporary file wrapper - - This class provides a wrapper around files opened for - temporary use. In particular, it seeks to automatically - remove the file when it is no longer needed. - """ - - def __init__(self, file, name, delete=True): - self.file = file - self.name = name - self.close_called = False - self.delete = delete - - def __getattr__(self, name): - # Attribute lookups are delegated to the underlying file - # and cached for non-numeric results - # (i.e. methods are cached, closed and friends are not) - file = self.__dict__['file'] - a = getattr(file, name) - if not issubclass(type(a), type(0)): - setattr(self, name, a) - return a - - # The underlying __enter__ method returns the wrong object - # (self.file) so override it to return the wrapper - def __enter__(self): - self.file.__enter__() - return self - - # NT provides delete-on-close as a primitive, so we don't need - # the wrapper to do anything special. We still use it so that - # file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile. - if _os.name != 'nt': - # Cache the unlinker so we don't get spurious errors at - # shutdown when the module-level "os" is None'd out. Note - # that this must be referenced as self.unlink, because the - # name TemporaryFileWrapper may also get None'd out before - # __del__ is called. - unlink = _os.unlink - - def close(self): - if not self.close_called: - self.close_called = True - self.file.close() - if self.delete: - self.unlink(self.name) - - def __del__(self): - self.close() - - # Need to trap __exit__ as well to ensure the file gets - # deleted when used in a with statement - def __exit__(self, exc, value, tb): - result = self.file.__exit__(exc, value, tb) - self.close() - return result - - -def NamedTemporaryFile(mode='w+b', bufsize=-1, suffix="", - prefix=template, dir=None, delete=True): - """Create and return a temporary file. - Arguments: - 'prefix', 'suffix', 'dir' -- as for mkstemp. - 'mode' -- the mode argument to os.fdopen (default "w+b"). - 'bufsize' -- the buffer size argument to os.fdopen (default -1). - 'delete' -- whether the file is deleted on close (default True). - The file is created as mkstemp() would do it. - - Returns an object with a file-like interface; the name of the file - is accessible as file.name. The file will be automatically deleted - when it is closed unless the 'delete' argument is set to False. - """ - - if dir is None: - dir = gettempdir() - - if 'b' in mode: - flags = _bin_openflags - else: - flags = _text_openflags - - # Setting O_TEMPORARY in the flags causes the OS to delete - # the file when it is closed. This is only supported by Windows. - if _os.name == 'nt' and delete: - flags |= _os.O_TEMPORARY - - (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags) - file = _os.fdopen(fd, mode, bufsize) - return _TemporaryFileWrapper(file, name, delete) - -if _os.name != 'posix' or _os.sys.platform == 'cygwin': - # On non-POSIX and Cygwin systems, assume that we cannot unlink a file - # while it is open. - TemporaryFile = NamedTemporaryFile - -else: - def TemporaryFile(mode='w+b', bufsize=-1, suffix="", - prefix=template, dir=None): - """Create and return a temporary file. - Arguments: - 'prefix', 'suffix', 'dir' -- as for mkstemp. - 'mode' -- the mode argument to os.fdopen (default "w+b"). - 'bufsize' -- the buffer size argument to os.fdopen (default -1). - The file is created as mkstemp() would do it. - - Returns an object with a file-like interface. The file has no - name, and will cease to exist when it is closed. - """ - - if dir is None: - dir = gettempdir() - - if 'b' in mode: - flags = _bin_openflags - else: - flags = _text_openflags - - (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags) - try: - _os.unlink(name) - return _os.fdopen(fd, mode, bufsize) - except: - _os.close(fd) - raise - -class SpooledTemporaryFile: - """Temporary file wrapper, specialized to switch from - StringIO to a real file when it exceeds a certain size or - when a fileno is needed. - """ - _rolled = False - - def __init__(self, max_size=0, mode='w+b', bufsize=-1, - suffix="", prefix=template, dir=None): - self._file = _StringIO() - self._max_size = max_size - self._rolled = False - self._TemporaryFileArgs = (mode, bufsize, suffix, prefix, dir) - - def _check(self, file): - if self._rolled: return - max_size = self._max_size - if max_size and file.tell() > max_size: - self.rollover() - - def rollover(self): - if self._rolled: return - file = self._file - newfile = self._file = TemporaryFile(*self._TemporaryFileArgs) - del self._TemporaryFileArgs - - newfile.write(file.getvalue()) - newfile.seek(file.tell(), 0) - - self._rolled = True - - # The method caching trick from NamedTemporaryFile - # won't work here, because _file may change from a - # _StringIO instance to a real file. So we list - # all the methods directly. - - # Context management protocol - def __enter__(self): - if self._file.closed: - raise ValueError("Cannot enter context with closed file") - return self - - def __exit__(self, exc, value, tb): - self._file.close() - - # file protocol - def __iter__(self): - return self._file.__iter__() - - def close(self): - self._file.close() - - @property - def closed(self): - return self._file.closed - - @property - def encoding(self): - return self._file.encoding - - def fileno(self): - self.rollover() - return self._file.fileno() - - def flush(self): - self._file.flush() - - def isatty(self): - return self._file.isatty() - - @property - def mode(self): - return self._file.mode - - @property - def name(self): - return self._file.name - - @property - def newlines(self): - return self._file.newlines - - def next(self): - return self._file.next - - def read(self, *args): - return self._file.read(*args) - - def readline(self, *args): - return self._file.readline(*args) - - def readlines(self, *args): - return self._file.readlines(*args) - - def seek(self, *args): - self._file.seek(*args) - - @property - def softspace(self): - return self._file.softspace - - def tell(self): - return self._file.tell() - - def truncate(self): - self._file.truncate() - - def write(self, s): - file = self._file - rv = file.write(s) - self._check(file) - return rv - - def writelines(self, iterable): - file = self._file - rv = file.writelines(iterable) - self._check(file) - return rv - - def xreadlines(self, *args): - return self._file.xreadlines(*args) diff --git a/icarus-miner/data/usr/lib/python2.6/lib-dynload/tempfile.pyc b/icarus-miner/data/usr/lib/python2.6/lib-dynload/tempfile.pyc deleted file mode 100644 index 4b907be..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/lib-dynload/tempfile.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/lib-dynload/termios.so b/icarus-miner/data/usr/lib/python2.6/lib-dynload/termios.so deleted file mode 100755 index 9686b23..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/lib-dynload/termios.so and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/lib-dynload/time.so b/icarus-miner/data/usr/lib/python2.6/lib-dynload/time.so deleted file mode 100755 index 349d145..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/lib-dynload/time.so and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/linecache.py b/icarus-miner/data/usr/lib/python2.6/linecache.py deleted file mode 100644 index e7c33e1..0000000 --- a/icarus-miner/data/usr/lib/python2.6/linecache.py +++ /dev/null @@ -1,138 +0,0 @@ -"""Cache lines from files. - -This is intended to read lines from modules imported -- hence if a filename -is not found, it will look down the module search path for a file by -that name. -""" - -import sys -import os - -__all__ = ["getline", "clearcache", "checkcache"] - -def getline(filename, lineno, module_globals=None): - lines = getlines(filename, module_globals) - if 1 <= lineno <= len(lines): - return lines[lineno-1] - else: - return '' - - -# The cache - -cache = {} # The cache - - -def clearcache(): - """Clear the cache entirely.""" - - global cache - cache = {} - - -def getlines(filename, module_globals=None): - """Get the lines for a file from the cache. - Update the cache if it doesn't contain an entry for this file already.""" - - if filename in cache: - return cache[filename][2] - else: - return updatecache(filename, module_globals) - - -def checkcache(filename=None): - """Discard cache entries that are out of date. - (This is not checked upon each call!)""" - - if filename is None: - filenames = cache.keys() - else: - if filename in cache: - filenames = [filename] - else: - return - - for filename in filenames: - size, mtime, lines, fullname = cache[filename] - if mtime is None: - continue # no-op for files loaded via a __loader__ - try: - stat = os.stat(fullname) - except os.error: - del cache[filename] - continue - if size != stat.st_size or mtime != stat.st_mtime: - del cache[filename] - - -def updatecache(filename, module_globals=None): - """Update a cache entry and return its list of lines. - If something's wrong, print a message, discard the cache entry, - and return an empty list.""" - - if filename in cache: - del cache[filename] - if not filename or filename[0] + filename[-1] == '<>': - return [] - - fullname = filename - try: - stat = os.stat(fullname) - except os.error, msg: - basename = filename - - # Try for a __loader__, if available - if module_globals and '__loader__' in module_globals: - name = module_globals.get('__name__') - loader = module_globals['__loader__'] - get_source = getattr(loader, 'get_source', None) - - if name and get_source: - try: - data = get_source(name) - except (ImportError, IOError): - pass - else: - if data is None: - # No luck, the PEP302 loader cannot find the source - # for this module. - return [] - cache[filename] = ( - len(data), None, - [line+'\n' for line in data.splitlines()], fullname - ) - return cache[filename][2] - - # Try looking through the module search path, which is only useful - # when handling a relative filename. - if os.path.isabs(filename): - return [] - - for dirname in sys.path: - # When using imputil, sys.path may contain things other than - # strings; ignore them when it happens. - try: - fullname = os.path.join(dirname, basename) - except (TypeError, AttributeError): - # Not sufficiently string-like to do anything useful with. - pass - else: - try: - stat = os.stat(fullname) - break - except os.error: - pass - else: - # No luck -## print '*** Cannot stat', filename, ':', msg - return [] - try: - fp = open(fullname, 'rU') - lines = fp.readlines() - fp.close() - except IOError, msg: -## print '*** Cannot open', fullname, ':', msg - return [] - size, mtime = stat.st_size, stat.st_mtime - cache[filename] = size, mtime, lines, fullname - return lines diff --git a/icarus-miner/data/usr/lib/python2.6/linecache.pyc b/icarus-miner/data/usr/lib/python2.6/linecache.pyc deleted file mode 100644 index f678a7a..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/linecache.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/mimetools.py b/icarus-miner/data/usr/lib/python2.6/mimetools.py deleted file mode 100644 index 71ca8f8..0000000 --- a/icarus-miner/data/usr/lib/python2.6/mimetools.py +++ /dev/null @@ -1,250 +0,0 @@ -"""Various tools used by MIME-reading or MIME-writing programs.""" - - -import os -import sys -import tempfile -from warnings import filterwarnings, catch_warnings -with catch_warnings(): - if sys.py3kwarning: - filterwarnings("ignore", ".*rfc822 has been removed", DeprecationWarning) - import rfc822 - -from warnings import warnpy3k -warnpy3k("in 3.x, mimetools has been removed in favor of the email package", - stacklevel=2) - -__all__ = ["Message","choose_boundary","encode","decode","copyliteral", - "copybinary"] - -class Message(rfc822.Message): - """A derived class of rfc822.Message that knows about MIME headers and - contains some hooks for decoding encoded and multipart messages.""" - - def __init__(self, fp, seekable = 1): - rfc822.Message.__init__(self, fp, seekable) - self.encodingheader = \ - self.getheader('content-transfer-encoding') - self.typeheader = \ - self.getheader('content-type') - self.parsetype() - self.parseplist() - - def parsetype(self): - str = self.typeheader - if str is None: - str = 'text/plain' - if ';' in str: - i = str.index(';') - self.plisttext = str[i:] - str = str[:i] - else: - self.plisttext = '' - fields = str.split('/') - for i in range(len(fields)): - fields[i] = fields[i].strip().lower() - self.type = '/'.join(fields) - self.maintype = fields[0] - self.subtype = '/'.join(fields[1:]) - - def parseplist(self): - str = self.plisttext - self.plist = [] - while str[:1] == ';': - str = str[1:] - if ';' in str: - # XXX Should parse quotes! - end = str.index(';') - else: - end = len(str) - f = str[:end] - if '=' in f: - i = f.index('=') - f = f[:i].strip().lower() + \ - '=' + f[i+1:].strip() - self.plist.append(f.strip()) - str = str[end:] - - def getplist(self): - return self.plist - - def getparam(self, name): - name = name.lower() + '=' - n = len(name) - for p in self.plist: - if p[:n] == name: - return rfc822.unquote(p[n:]) - return None - - def getparamnames(self): - result = [] - for p in self.plist: - i = p.find('=') - if i >= 0: - result.append(p[:i].lower()) - return result - - def getencoding(self): - if self.encodingheader is None: - return '7bit' - return self.encodingheader.lower() - - def gettype(self): - return self.type - - def getmaintype(self): - return self.maintype - - def getsubtype(self): - return self.subtype - - - - -# Utility functions -# ----------------- - -try: - import thread -except ImportError: - import dummy_thread as thread -_counter_lock = thread.allocate_lock() -del thread - -_counter = 0 -def _get_next_counter(): - global _counter - _counter_lock.acquire() - _counter += 1 - result = _counter - _counter_lock.release() - return result - -_prefix = None - -def choose_boundary(): - """Return a string usable as a multipart boundary. - - The string chosen is unique within a single program run, and - incorporates the user id (if available), process id (if available), - and current time. So it's very unlikely the returned string appears - in message text, but there's no guarantee. - - The boundary contains dots so you have to quote it in the header.""" - - global _prefix - import time - if _prefix is None: - import socket - try: - hostid = socket.gethostbyname(socket.gethostname()) - except socket.gaierror: - hostid = '127.0.0.1' - try: - uid = repr(os.getuid()) - except AttributeError: - uid = '1' - try: - pid = repr(os.getpid()) - except AttributeError: - pid = '1' - _prefix = hostid + '.' + uid + '.' + pid - return "%s.%.3f.%d" % (_prefix, time.time(), _get_next_counter()) - - -# Subroutines for decoding some common content-transfer-types - -def decode(input, output, encoding): - """Decode common content-transfer-encodings (base64, quopri, uuencode).""" - if encoding == 'base64': - import base64 - return base64.decode(input, output) - if encoding == 'quoted-printable': - import quopri - return quopri.decode(input, output) - if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'): - import uu - return uu.decode(input, output) - if encoding in ('7bit', '8bit'): - return output.write(input.read()) - if encoding in decodetab: - pipethrough(input, decodetab[encoding], output) - else: - raise ValueError, \ - 'unknown Content-Transfer-Encoding: %s' % encoding - -def encode(input, output, encoding): - """Encode common content-transfer-encodings (base64, quopri, uuencode).""" - if encoding == 'base64': - import base64 - return base64.encode(input, output) - if encoding == 'quoted-printable': - import quopri - return quopri.encode(input, output, 0) - if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'): - import uu - return uu.encode(input, output) - if encoding in ('7bit', '8bit'): - return output.write(input.read()) - if encoding in encodetab: - pipethrough(input, encodetab[encoding], output) - else: - raise ValueError, \ - 'unknown Content-Transfer-Encoding: %s' % encoding - -# The following is no longer used for standard encodings - -# XXX This requires that uudecode and mmencode are in $PATH - -uudecode_pipe = '''( -TEMP=/tmp/@uu.$$ -sed "s%^begin [0-7][0-7]* .*%begin 600 $TEMP%" | uudecode -cat $TEMP -rm $TEMP -)''' - -decodetab = { - 'uuencode': uudecode_pipe, - 'x-uuencode': uudecode_pipe, - 'uue': uudecode_pipe, - 'x-uue': uudecode_pipe, - 'quoted-printable': 'mmencode -u -q', - 'base64': 'mmencode -u -b', -} - -encodetab = { - 'x-uuencode': 'uuencode tempfile', - 'uuencode': 'uuencode tempfile', - 'x-uue': 'uuencode tempfile', - 'uue': 'uuencode tempfile', - 'quoted-printable': 'mmencode -q', - 'base64': 'mmencode -b', -} - -def pipeto(input, command): - pipe = os.popen(command, 'w') - copyliteral(input, pipe) - pipe.close() - -def pipethrough(input, command, output): - (fd, tempname) = tempfile.mkstemp() - temp = os.fdopen(fd, 'w') - copyliteral(input, temp) - temp.close() - pipe = os.popen(command + ' <' + tempname, 'r') - copybinary(pipe, output) - pipe.close() - os.unlink(tempname) - -def copyliteral(input, output): - while 1: - line = input.readline() - if not line: break - output.write(line) - -def copybinary(input, output): - BUFSIZE = 8192 - while 1: - line = input.read(BUFSIZE) - if not line: break - output.write(line) diff --git a/icarus-miner/data/usr/lib/python2.6/mimetools.pyc b/icarus-miner/data/usr/lib/python2.6/mimetools.pyc deleted file mode 100644 index 18b0b56..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/mimetools.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/optparse.py b/icarus-miner/data/usr/lib/python2.6/optparse.py deleted file mode 100644 index d0d5961..0000000 --- a/icarus-miner/data/usr/lib/python2.6/optparse.py +++ /dev/null @@ -1,1698 +0,0 @@ -"""A powerful, extensible, and easy-to-use option parser. - -By Greg Ward - -Originally distributed as Optik. - -For support, use the optik-users@lists.sourceforge.net mailing list -(http://lists.sourceforge.net/lists/listinfo/optik-users). - -Simple usage example: - - from optparse import OptionParser - - parser = OptionParser() - parser.add_option("-f", "--file", dest="filename", - help="write report to FILE", metavar="FILE") - parser.add_option("-q", "--quiet", - action="store_false", dest="verbose", default=True, - help="don't print status messages to stdout") - - (options, args) = parser.parse_args() -""" - -__version__ = "1.5.3" - -__all__ = ['Option', - 'make_option', - 'SUPPRESS_HELP', - 'SUPPRESS_USAGE', - 'Values', - 'OptionContainer', - 'OptionGroup', - 'OptionParser', - 'HelpFormatter', - 'IndentedHelpFormatter', - 'TitledHelpFormatter', - 'OptParseError', - 'OptionError', - 'OptionConflictError', - 'OptionValueError', - 'BadOptionError'] - -__copyright__ = """ -Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved. -Copyright (c) 2002-2006 Python Software Foundation. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - * Neither the name of the author nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -""" - -import sys, os -import types -import textwrap - -def _repr(self): - return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self) - - -# This file was generated from: -# Id: option_parser.py 527 2006-07-23 15:21:30Z greg -# Id: option.py 522 2006-06-11 16:22:03Z gward -# Id: help.py 527 2006-07-23 15:21:30Z greg -# Id: errors.py 509 2006-04-20 00:58:24Z gward - -try: - from gettext import gettext -except ImportError: - def gettext(message): - return message -_ = gettext - - -class OptParseError (Exception): - def __init__(self, msg): - self.msg = msg - - def __str__(self): - return self.msg - - -class OptionError (OptParseError): - """ - Raised if an Option instance is created with invalid or - inconsistent arguments. - """ - - def __init__(self, msg, option): - self.msg = msg - self.option_id = str(option) - - def __str__(self): - if self.option_id: - return "option %s: %s" % (self.option_id, self.msg) - else: - return self.msg - -class OptionConflictError (OptionError): - """ - Raised if conflicting options are added to an OptionParser. - """ - -class OptionValueError (OptParseError): - """ - Raised if an invalid option value is encountered on the command - line. - """ - -class BadOptionError (OptParseError): - """ - Raised if an invalid option is seen on the command line. - """ - def __init__(self, opt_str): - self.opt_str = opt_str - - def __str__(self): - return _("no such option: %s") % self.opt_str - -class AmbiguousOptionError (BadOptionError): - """ - Raised if an ambiguous option is seen on the command line. - """ - def __init__(self, opt_str, possibilities): - BadOptionError.__init__(self, opt_str) - self.possibilities = possibilities - - def __str__(self): - return (_("ambiguous option: %s (%s?)") - % (self.opt_str, ", ".join(self.possibilities))) - - -class HelpFormatter: - - """ - Abstract base class for formatting option help. OptionParser - instances should use one of the HelpFormatter subclasses for - formatting help; by default IndentedHelpFormatter is used. - - Instance attributes: - parser : OptionParser - the controlling OptionParser instance - indent_increment : int - the number of columns to indent per nesting level - max_help_position : int - the maximum starting column for option help text - help_position : int - the calculated starting column for option help text; - initially the same as the maximum - width : int - total number of columns for output (pass None to constructor for - this value to be taken from the $COLUMNS environment variable) - level : int - current indentation level - current_indent : int - current indentation level (in columns) - help_width : int - number of columns available for option help text (calculated) - default_tag : str - text to replace with each option's default value, "%default" - by default. Set to false value to disable default value expansion. - option_strings : { Option : str } - maps Option instances to the snippet of help text explaining - the syntax of that option, e.g. "-h, --help" or - "-fFILE, --file=FILE" - _short_opt_fmt : str - format string controlling how short options with values are - printed in help text. Must be either "%s%s" ("-fFILE") or - "%s %s" ("-f FILE"), because those are the two syntaxes that - Optik supports. - _long_opt_fmt : str - similar but for long options; must be either "%s %s" ("--file FILE") - or "%s=%s" ("--file=FILE"). - """ - - NO_DEFAULT_VALUE = "none" - - def __init__(self, - indent_increment, - max_help_position, - width, - short_first): - self.parser = None - self.indent_increment = indent_increment - self.help_position = self.max_help_position = max_help_position - if width is None: - try: - width = int(os.environ['COLUMNS']) - except (KeyError, ValueError): - width = 80 - width -= 2 - self.width = width - self.current_indent = 0 - self.level = 0 - self.help_width = None # computed later - self.short_first = short_first - self.default_tag = "%default" - self.option_strings = {} - self._short_opt_fmt = "%s %s" - self._long_opt_fmt = "%s=%s" - - def set_parser(self, parser): - self.parser = parser - - def set_short_opt_delimiter(self, delim): - if delim not in ("", " "): - raise ValueError( - "invalid metavar delimiter for short options: %r" % delim) - self._short_opt_fmt = "%s" + delim + "%s" - - def set_long_opt_delimiter(self, delim): - if delim not in ("=", " "): - raise ValueError( - "invalid metavar delimiter for long options: %r" % delim) - self._long_opt_fmt = "%s" + delim + "%s" - - def indent(self): - self.current_indent += self.indent_increment - self.level += 1 - - def dedent(self): - self.current_indent -= self.indent_increment - assert self.current_indent >= 0, "Indent decreased below 0." - self.level -= 1 - - def format_usage(self, usage): - raise NotImplementedError, "subclasses must implement" - - def format_heading(self, heading): - raise NotImplementedError, "subclasses must implement" - - def _format_text(self, text): - """ - Format a paragraph of free-form text for inclusion in the - help output at the current indentation level. - """ - text_width = self.width - self.current_indent - indent = " "*self.current_indent - return textwrap.fill(text, - text_width, - initial_indent=indent, - subsequent_indent=indent) - - def format_description(self, description): - if description: - return self._format_text(description) + "\n" - else: - return "" - - def format_epilog(self, epilog): - if epilog: - return "\n" + self._format_text(epilog) + "\n" - else: - return "" - - - def expand_default(self, option): - if self.parser is None or not self.default_tag: - return option.help - - default_value = self.parser.defaults.get(option.dest) - if default_value is NO_DEFAULT or default_value is None: - default_value = self.NO_DEFAULT_VALUE - - return option.help.replace(self.default_tag, str(default_value)) - - def format_option(self, option): - # The help for each option consists of two parts: - # * the opt strings and metavars - # eg. ("-x", or "-fFILENAME, --file=FILENAME") - # * the user-supplied help string - # eg. ("turn on expert mode", "read data from FILENAME") - # - # If possible, we write both of these on the same line: - # -x turn on expert mode - # - # But if the opt string list is too long, we put the help - # string on a second line, indented to the same column it would - # start in if it fit on the first line. - # -fFILENAME, --file=FILENAME - # read data from FILENAME - result = [] - opts = self.option_strings[option] - opt_width = self.help_position - self.current_indent - 2 - if len(opts) > opt_width: - opts = "%*s%s\n" % (self.current_indent, "", opts) - indent_first = self.help_position - else: # start help on same line as opts - opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts) - indent_first = 0 - result.append(opts) - if option.help: - help_text = self.expand_default(option) - help_lines = textwrap.wrap(help_text, self.help_width) - result.append("%*s%s\n" % (indent_first, "", help_lines[0])) - result.extend(["%*s%s\n" % (self.help_position, "", line) - for line in help_lines[1:]]) - elif opts[-1] != "\n": - result.append("\n") - return "".join(result) - - def store_option_strings(self, parser): - self.indent() - max_len = 0 - for opt in parser.option_list: - strings = self.format_option_strings(opt) - self.option_strings[opt] = strings - max_len = max(max_len, len(strings) + self.current_indent) - self.indent() - for group in parser.option_groups: - for opt in group.option_list: - strings = self.format_option_strings(opt) - self.option_strings[opt] = strings - max_len = max(max_len, len(strings) + self.current_indent) - self.dedent() - self.dedent() - self.help_position = min(max_len + 2, self.max_help_position) - self.help_width = self.width - self.help_position - - def format_option_strings(self, option): - """Return a comma-separated list of option strings & metavariables.""" - if option.takes_value(): - metavar = option.metavar or option.dest.upper() - short_opts = [self._short_opt_fmt % (sopt, metavar) - for sopt in option._short_opts] - long_opts = [self._long_opt_fmt % (lopt, metavar) - for lopt in option._long_opts] - else: - short_opts = option._short_opts - long_opts = option._long_opts - - if self.short_first: - opts = short_opts + long_opts - else: - opts = long_opts + short_opts - - return ", ".join(opts) - -class IndentedHelpFormatter (HelpFormatter): - """Format help with indented section bodies. - """ - - def __init__(self, - indent_increment=2, - max_help_position=24, - width=None, - short_first=1): - HelpFormatter.__init__( - self, indent_increment, max_help_position, width, short_first) - - def format_usage(self, usage): - return _("Usage: %s\n") % usage - - def format_heading(self, heading): - return "%*s%s:\n" % (self.current_indent, "", heading) - - -class TitledHelpFormatter (HelpFormatter): - """Format help with underlined section headers. - """ - - def __init__(self, - indent_increment=0, - max_help_position=24, - width=None, - short_first=0): - HelpFormatter.__init__ ( - self, indent_increment, max_help_position, width, short_first) - - def format_usage(self, usage): - return "%s %s\n" % (self.format_heading(_("Usage")), usage) - - def format_heading(self, heading): - return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading)) - - -def _parse_num(val, type): - if val[:2].lower() == "0x": # hexadecimal - radix = 16 - elif val[:2].lower() == "0b": # binary - radix = 2 - val = val[2:] or "0" # have to remove "0b" prefix - elif val[:1] == "0": # octal - radix = 8 - else: # decimal - radix = 10 - - return type(val, radix) - -def _parse_int(val): - return _parse_num(val, int) - -def _parse_long(val): - return _parse_num(val, long) - -_builtin_cvt = { "int" : (_parse_int, _("integer")), - "long" : (_parse_long, _("long integer")), - "float" : (float, _("floating-point")), - "complex" : (complex, _("complex")) } - -def check_builtin(option, opt, value): - (cvt, what) = _builtin_cvt[option.type] - try: - return cvt(value) - except ValueError: - raise OptionValueError( - _("option %s: invalid %s value: %r") % (opt, what, value)) - -def check_choice(option, opt, value): - if value in option.choices: - return value - else: - choices = ", ".join(map(repr, option.choices)) - raise OptionValueError( - _("option %s: invalid choice: %r (choose from %s)") - % (opt, value, choices)) - -# Not supplying a default is different from a default of None, -# so we need an explicit "not supplied" value. -NO_DEFAULT = ("NO", "DEFAULT") - - -class Option: - """ - Instance attributes: - _short_opts : [string] - _long_opts : [string] - - action : string - type : string - dest : string - default : any - nargs : int - const : any - choices : [string] - callback : function - callback_args : (any*) - callback_kwargs : { string : any } - help : string - metavar : string - """ - - # The list of instance attributes that may be set through - # keyword args to the constructor. - ATTRS = ['action', - 'type', - 'dest', - 'default', - 'nargs', - 'const', - 'choices', - 'callback', - 'callback_args', - 'callback_kwargs', - 'help', - 'metavar'] - - # The set of actions allowed by option parsers. Explicitly listed - # here so the constructor can validate its arguments. - ACTIONS = ("store", - "store_const", - "store_true", - "store_false", - "append", - "append_const", - "count", - "callback", - "help", - "version") - - # The set of actions that involve storing a value somewhere; - # also listed just for constructor argument validation. (If - # the action is one of these, there must be a destination.) - STORE_ACTIONS = ("store", - "store_const", - "store_true", - "store_false", - "append", - "append_const", - "count") - - # The set of actions for which it makes sense to supply a value - # type, ie. which may consume an argument from the command line. - TYPED_ACTIONS = ("store", - "append", - "callback") - - # The set of actions which *require* a value type, ie. that - # always consume an argument from the command line. - ALWAYS_TYPED_ACTIONS = ("store", - "append") - - # The set of actions which take a 'const' attribute. - CONST_ACTIONS = ("store_const", - "append_const") - - # The set of known types for option parsers. Again, listed here for - # constructor argument validation. - TYPES = ("string", "int", "long", "float", "complex", "choice") - - # Dictionary of argument checking functions, which convert and - # validate option arguments according to the option type. - # - # Signature of checking functions is: - # check(option : Option, opt : string, value : string) -> any - # where - # option is the Option instance calling the checker - # opt is the actual option seen on the command-line - # (eg. "-a", "--file") - # value is the option argument seen on the command-line - # - # The return value should be in the appropriate Python type - # for option.type -- eg. an integer if option.type == "int". - # - # If no checker is defined for a type, arguments will be - # unchecked and remain strings. - TYPE_CHECKER = { "int" : check_builtin, - "long" : check_builtin, - "float" : check_builtin, - "complex": check_builtin, - "choice" : check_choice, - } - - - # CHECK_METHODS is a list of unbound method objects; they are called - # by the constructor, in order, after all attributes are - # initialized. The list is created and filled in later, after all - # the methods are actually defined. (I just put it here because I - # like to define and document all class attributes in the same - # place.) Subclasses that add another _check_*() method should - # define their own CHECK_METHODS list that adds their check method - # to those from this class. - CHECK_METHODS = None - - - # -- Constructor/initialization methods ---------------------------- - - def __init__(self, *opts, **attrs): - # Set _short_opts, _long_opts attrs from 'opts' tuple. - # Have to be set now, in case no option strings are supplied. - self._short_opts = [] - self._long_opts = [] - opts = self._check_opt_strings(opts) - self._set_opt_strings(opts) - - # Set all other attrs (action, type, etc.) from 'attrs' dict - self._set_attrs(attrs) - - # Check all the attributes we just set. There are lots of - # complicated interdependencies, but luckily they can be farmed - # out to the _check_*() methods listed in CHECK_METHODS -- which - # could be handy for subclasses! The one thing these all share - # is that they raise OptionError if they discover a problem. - for checker in self.CHECK_METHODS: - checker(self) - - def _check_opt_strings(self, opts): - # Filter out None because early versions of Optik had exactly - # one short option and one long option, either of which - # could be None. - opts = filter(None, opts) - if not opts: - raise TypeError("at least one option string must be supplied") - return opts - - def _set_opt_strings(self, opts): - for opt in opts: - if len(opt) < 2: - raise OptionError( - "invalid option string %r: " - "must be at least two characters long" % opt, self) - elif len(opt) == 2: - if not (opt[0] == "-" and opt[1] != "-"): - raise OptionError( - "invalid short option string %r: " - "must be of the form -x, (x any non-dash char)" % opt, - self) - self._short_opts.append(opt) - else: - if not (opt[0:2] == "--" and opt[2] != "-"): - raise OptionError( - "invalid long option string %r: " - "must start with --, followed by non-dash" % opt, - self) - self._long_opts.append(opt) - - def _set_attrs(self, attrs): - for attr in self.ATTRS: - if attr in attrs: - setattr(self, attr, attrs[attr]) - del attrs[attr] - else: - if attr == 'default': - setattr(self, attr, NO_DEFAULT) - else: - setattr(self, attr, None) - if attrs: - attrs = attrs.keys() - attrs.sort() - raise OptionError( - "invalid keyword arguments: %s" % ", ".join(attrs), - self) - - - # -- Constructor validation methods -------------------------------- - - def _check_action(self): - if self.action is None: - self.action = "store" - elif self.action not in self.ACTIONS: - raise OptionError("invalid action: %r" % self.action, self) - - def _check_type(self): - if self.type is None: - if self.action in self.ALWAYS_TYPED_ACTIONS: - if self.choices is not None: - # The "choices" attribute implies "choice" type. - self.type = "choice" - else: - # No type given? "string" is the most sensible default. - self.type = "string" - else: - # Allow type objects or builtin type conversion functions - # (int, str, etc.) as an alternative to their names. (The - # complicated check of __builtin__ is only necessary for - # Python 2.1 and earlier, and is short-circuited by the - # first check on modern Pythons.) - import __builtin__ - if ( type(self.type) is types.TypeType or - (hasattr(self.type, "__name__") and - getattr(__builtin__, self.type.__name__, None) is self.type) ): - self.type = self.type.__name__ - - if self.type == "str": - self.type = "string" - - if self.type not in self.TYPES: - raise OptionError("invalid option type: %r" % self.type, self) - if self.action not in self.TYPED_ACTIONS: - raise OptionError( - "must not supply a type for action %r" % self.action, self) - - def _check_choice(self): - if self.type == "choice": - if self.choices is None: - raise OptionError( - "must supply a list of choices for type 'choice'", self) - elif type(self.choices) not in (types.TupleType, types.ListType): - raise OptionError( - "choices must be a list of strings ('%s' supplied)" - % str(type(self.choices)).split("'")[1], self) - elif self.choices is not None: - raise OptionError( - "must not supply choices for type %r" % self.type, self) - - def _check_dest(self): - # No destination given, and we need one for this action. The - # self.type check is for callbacks that take a value. - takes_value = (self.action in self.STORE_ACTIONS or - self.type is not None) - if self.dest is None and takes_value: - - # Glean a destination from the first long option string, - # or from the first short option string if no long options. - if self._long_opts: - # eg. "--foo-bar" -> "foo_bar" - self.dest = self._long_opts[0][2:].replace('-', '_') - else: - self.dest = self._short_opts[0][1] - - def _check_const(self): - if self.action not in self.CONST_ACTIONS and self.const is not None: - raise OptionError( - "'const' must not be supplied for action %r" % self.action, - self) - - def _check_nargs(self): - if self.action in self.TYPED_ACTIONS: - if self.nargs is None: - self.nargs = 1 - elif self.nargs is not None: - raise OptionError( - "'nargs' must not be supplied for action %r" % self.action, - self) - - def _check_callback(self): - if self.action == "callback": - if not hasattr(self.callback, '__call__'): - raise OptionError( - "callback not callable: %r" % self.callback, self) - if (self.callback_args is not None and - type(self.callback_args) is not types.TupleType): - raise OptionError( - "callback_args, if supplied, must be a tuple: not %r" - % self.callback_args, self) - if (self.callback_kwargs is not None and - type(self.callback_kwargs) is not types.DictType): - raise OptionError( - "callback_kwargs, if supplied, must be a dict: not %r" - % self.callback_kwargs, self) - else: - if self.callback is not None: - raise OptionError( - "callback supplied (%r) for non-callback option" - % self.callback, self) - if self.callback_args is not None: - raise OptionError( - "callback_args supplied for non-callback option", self) - if self.callback_kwargs is not None: - raise OptionError( - "callback_kwargs supplied for non-callback option", self) - - - CHECK_METHODS = [_check_action, - _check_type, - _check_choice, - _check_dest, - _check_const, - _check_nargs, - _check_callback] - - - # -- Miscellaneous methods ----------------------------------------- - - def __str__(self): - return "/".join(self._short_opts + self._long_opts) - - __repr__ = _repr - - def takes_value(self): - return self.type is not None - - def get_opt_string(self): - if self._long_opts: - return self._long_opts[0] - else: - return self._short_opts[0] - - - # -- Processing methods -------------------------------------------- - - def check_value(self, opt, value): - checker = self.TYPE_CHECKER.get(self.type) - if checker is None: - return value - else: - return checker(self, opt, value) - - def convert_value(self, opt, value): - if value is not None: - if self.nargs == 1: - return self.check_value(opt, value) - else: - return tuple([self.check_value(opt, v) for v in value]) - - def process(self, opt, value, values, parser): - - # First, convert the value(s) to the right type. Howl if any - # value(s) are bogus. - value = self.convert_value(opt, value) - - # And then take whatever action is expected of us. - # This is a separate method to make life easier for - # subclasses to add new actions. - return self.take_action( - self.action, self.dest, opt, value, values, parser) - - def take_action(self, action, dest, opt, value, values, parser): - if action == "store": - setattr(values, dest, value) - elif action == "store_const": - setattr(values, dest, self.const) - elif action == "store_true": - setattr(values, dest, True) - elif action == "store_false": - setattr(values, dest, False) - elif action == "append": - values.ensure_value(dest, []).append(value) - elif action == "append_const": - values.ensure_value(dest, []).append(self.const) - elif action == "count": - setattr(values, dest, values.ensure_value(dest, 0) + 1) - elif action == "callback": - args = self.callback_args or () - kwargs = self.callback_kwargs or {} - self.callback(self, opt, value, parser, *args, **kwargs) - elif action == "help": - parser.print_help() - parser.exit() - elif action == "version": - parser.print_version() - parser.exit() - else: - raise RuntimeError, "unknown action %r" % self.action - - return 1 - -# class Option - - -SUPPRESS_HELP = "SUPPRESS"+"HELP" -SUPPRESS_USAGE = "SUPPRESS"+"USAGE" - -try: - basestring -except NameError: - def isbasestring(x): - return isinstance(x, (types.StringType, types.UnicodeType)) -else: - def isbasestring(x): - return isinstance(x, basestring) - -class Values: - - def __init__(self, defaults=None): - if defaults: - for (attr, val) in defaults.items(): - setattr(self, attr, val) - - def __str__(self): - return str(self.__dict__) - - __repr__ = _repr - - def __cmp__(self, other): - if isinstance(other, Values): - return cmp(self.__dict__, other.__dict__) - elif isinstance(other, types.DictType): - return cmp(self.__dict__, other) - else: - return -1 - - def _update_careful(self, dict): - """ - Update the option values from an arbitrary dictionary, but only - use keys from dict that already have a corresponding attribute - in self. Any keys in dict without a corresponding attribute - are silently ignored. - """ - for attr in dir(self): - if attr in dict: - dval = dict[attr] - if dval is not None: - setattr(self, attr, dval) - - def _update_loose(self, dict): - """ - Update the option values from an arbitrary dictionary, - using all keys from the dictionary regardless of whether - they have a corresponding attribute in self or not. - """ - self.__dict__.update(dict) - - def _update(self, dict, mode): - if mode == "careful": - self._update_careful(dict) - elif mode == "loose": - self._update_loose(dict) - else: - raise ValueError, "invalid update mode: %r" % mode - - def read_module(self, modname, mode="careful"): - __import__(modname) - mod = sys.modules[modname] - self._update(vars(mod), mode) - - def read_file(self, filename, mode="careful"): - vars = {} - execfile(filename, vars) - self._update(vars, mode) - - def ensure_value(self, attr, value): - if not hasattr(self, attr) or getattr(self, attr) is None: - setattr(self, attr, value) - return getattr(self, attr) - - -class OptionContainer: - - """ - Abstract base class. - - Class attributes: - standard_option_list : [Option] - list of standard options that will be accepted by all instances - of this parser class (intended to be overridden by subclasses). - - Instance attributes: - option_list : [Option] - the list of Option objects contained by this OptionContainer - _short_opt : { string : Option } - dictionary mapping short option strings, eg. "-f" or "-X", - to the Option instances that implement them. If an Option - has multiple short option strings, it will appears in this - dictionary multiple times. [1] - _long_opt : { string : Option } - dictionary mapping long option strings, eg. "--file" or - "--exclude", to the Option instances that implement them. - Again, a given Option can occur multiple times in this - dictionary. [1] - defaults : { string : any } - dictionary mapping option destination names to default - values for each destination [1] - - [1] These mappings are common to (shared by) all components of the - controlling OptionParser, where they are initially created. - - """ - - def __init__(self, option_class, conflict_handler, description): - # Initialize the option list and related data structures. - # This method must be provided by subclasses, and it must - # initialize at least the following instance attributes: - # option_list, _short_opt, _long_opt, defaults. - self._create_option_list() - - self.option_class = option_class - self.set_conflict_handler(conflict_handler) - self.set_description(description) - - def _create_option_mappings(self): - # For use by OptionParser constructor -- create the master - # option mappings used by this OptionParser and all - # OptionGroups that it owns. - self._short_opt = {} # single letter -> Option instance - self._long_opt = {} # long option -> Option instance - self.defaults = {} # maps option dest -> default value - - - def _share_option_mappings(self, parser): - # For use by OptionGroup constructor -- use shared option - # mappings from the OptionParser that owns this OptionGroup. - self._short_opt = parser._short_opt - self._long_opt = parser._long_opt - self.defaults = parser.defaults - - def set_conflict_handler(self, handler): - if handler not in ("error", "resolve"): - raise ValueError, "invalid conflict_resolution value %r" % handler - self.conflict_handler = handler - - def set_description(self, description): - self.description = description - - def get_description(self): - return self.description - - - def destroy(self): - """see OptionParser.destroy().""" - del self._short_opt - del self._long_opt - del self.defaults - - - # -- Option-adding methods ----------------------------------------- - - def _check_conflict(self, option): - conflict_opts = [] - for opt in option._short_opts: - if opt in self._short_opt: - conflict_opts.append((opt, self._short_opt[opt])) - for opt in option._long_opts: - if opt in self._long_opt: - conflict_opts.append((opt, self._long_opt[opt])) - - if conflict_opts: - handler = self.conflict_handler - if handler == "error": - raise OptionConflictError( - "conflicting option string(s): %s" - % ", ".join([co[0] for co in conflict_opts]), - option) - elif handler == "resolve": - for (opt, c_option) in conflict_opts: - if opt.startswith("--"): - c_option._long_opts.remove(opt) - del self._long_opt[opt] - else: - c_option._short_opts.remove(opt) - del self._short_opt[opt] - if not (c_option._short_opts or c_option._long_opts): - c_option.container.option_list.remove(c_option) - - def add_option(self, *args, **kwargs): - """add_option(Option) - add_option(opt_str, ..., kwarg=val, ...) - """ - if type(args[0]) in types.StringTypes: - option = self.option_class(*args, **kwargs) - elif len(args) == 1 and not kwargs: - option = args[0] - if not isinstance(option, Option): - raise TypeError, "not an Option instance: %r" % option - else: - raise TypeError, "invalid arguments" - - self._check_conflict(option) - - self.option_list.append(option) - option.container = self - for opt in option._short_opts: - self._short_opt[opt] = option - for opt in option._long_opts: - self._long_opt[opt] = option - - if option.dest is not None: # option has a dest, we need a default - if option.default is not NO_DEFAULT: - self.defaults[option.dest] = option.default - elif option.dest not in self.defaults: - self.defaults[option.dest] = None - - return option - - def add_options(self, option_list): - for option in option_list: - self.add_option(option) - - # -- Option query/removal methods ---------------------------------- - - def get_option(self, opt_str): - return (self._short_opt.get(opt_str) or - self._long_opt.get(opt_str)) - - def has_option(self, opt_str): - return (opt_str in self._short_opt or - opt_str in self._long_opt) - - def remove_option(self, opt_str): - option = self._short_opt.get(opt_str) - if option is None: - option = self._long_opt.get(opt_str) - if option is None: - raise ValueError("no such option %r" % opt_str) - - for opt in option._short_opts: - del self._short_opt[opt] - for opt in option._long_opts: - del self._long_opt[opt] - option.container.option_list.remove(option) - - - # -- Help-formatting methods --------------------------------------- - - def format_option_help(self, formatter): - if not self.option_list: - return "" - result = [] - for option in self.option_list: - if not option.help is SUPPRESS_HELP: - result.append(formatter.format_option(option)) - return "".join(result) - - def format_description(self, formatter): - return formatter.format_description(self.get_description()) - - def format_help(self, formatter): - result = [] - if self.description: - result.append(self.format_description(formatter)) - if self.option_list: - result.append(self.format_option_help(formatter)) - return "\n".join(result) - - -class OptionGroup (OptionContainer): - - def __init__(self, parser, title, description=None): - self.parser = parser - OptionContainer.__init__( - self, parser.option_class, parser.conflict_handler, description) - self.title = title - - def _create_option_list(self): - self.option_list = [] - self._share_option_mappings(self.parser) - - def set_title(self, title): - self.title = title - - def destroy(self): - """see OptionParser.destroy().""" - OptionContainer.destroy(self) - del self.option_list - - # -- Help-formatting methods --------------------------------------- - - def format_help(self, formatter): - result = formatter.format_heading(self.title) - formatter.indent() - result += OptionContainer.format_help(self, formatter) - formatter.dedent() - return result - - -class OptionParser (OptionContainer): - - """ - Class attributes: - standard_option_list : [Option] - list of standard options that will be accepted by all instances - of this parser class (intended to be overridden by subclasses). - - Instance attributes: - usage : string - a usage string for your program. Before it is displayed - to the user, "%prog" will be expanded to the name of - your program (self.prog or os.path.basename(sys.argv[0])). - prog : string - the name of the current program (to override - os.path.basename(sys.argv[0])). - epilog : string - paragraph of help text to print after option help - - option_groups : [OptionGroup] - list of option groups in this parser (option groups are - irrelevant for parsing the command-line, but very useful - for generating help) - - allow_interspersed_args : bool = true - if true, positional arguments may be interspersed with options. - Assuming -a and -b each take a single argument, the command-line - -ablah foo bar -bboo baz - will be interpreted the same as - -ablah -bboo -- foo bar baz - If this flag were false, that command line would be interpreted as - -ablah -- foo bar -bboo baz - -- ie. we stop processing options as soon as we see the first - non-option argument. (This is the tradition followed by - Python's getopt module, Perl's Getopt::Std, and other argument- - parsing libraries, but it is generally annoying to users.) - - process_default_values : bool = true - if true, option default values are processed similarly to option - values from the command line: that is, they are passed to the - type-checking function for the option's type (as long as the - default value is a string). (This really only matters if you - have defined custom types; see SF bug #955889.) Set it to false - to restore the behaviour of Optik 1.4.1 and earlier. - - rargs : [string] - the argument list currently being parsed. Only set when - parse_args() is active, and continually trimmed down as - we consume arguments. Mainly there for the benefit of - callback options. - largs : [string] - the list of leftover arguments that we have skipped while - parsing options. If allow_interspersed_args is false, this - list is always empty. - values : Values - the set of option values currently being accumulated. Only - set when parse_args() is active. Also mainly for callbacks. - - Because of the 'rargs', 'largs', and 'values' attributes, - OptionParser is not thread-safe. If, for some perverse reason, you - need to parse command-line arguments simultaneously in different - threads, use different OptionParser instances. - - """ - - standard_option_list = [] - - def __init__(self, - usage=None, - option_list=None, - option_class=Option, - version=None, - conflict_handler="error", - description=None, - formatter=None, - add_help_option=True, - prog=None, - epilog=None): - OptionContainer.__init__( - self, option_class, conflict_handler, description) - self.set_usage(usage) - self.prog = prog - self.version = version - self.allow_interspersed_args = True - self.process_default_values = True - if formatter is None: - formatter = IndentedHelpFormatter() - self.formatter = formatter - self.formatter.set_parser(self) - self.epilog = epilog - - # Populate the option list; initial sources are the - # standard_option_list class attribute, the 'option_list' - # argument, and (if applicable) the _add_version_option() and - # _add_help_option() methods. - self._populate_option_list(option_list, - add_help=add_help_option) - - self._init_parsing_state() - - - def destroy(self): - """ - Declare that you are done with this OptionParser. This cleans up - reference cycles so the OptionParser (and all objects referenced by - it) can be garbage-collected promptly. After calling destroy(), the - OptionParser is unusable. - """ - OptionContainer.destroy(self) - for group in self.option_groups: - group.destroy() - del self.option_list - del self.option_groups - del self.formatter - - - # -- Private methods ----------------------------------------------- - # (used by our or OptionContainer's constructor) - - def _create_option_list(self): - self.option_list = [] - self.option_groups = [] - self._create_option_mappings() - - def _add_help_option(self): - self.add_option("-h", "--help", - action="help", - help=_("show this help message and exit")) - - def _add_version_option(self): - self.add_option("--version", - action="version", - help=_("show program's version number and exit")) - - def _populate_option_list(self, option_list, add_help=True): - if self.standard_option_list: - self.add_options(self.standard_option_list) - if option_list: - self.add_options(option_list) - if self.version: - self._add_version_option() - if add_help: - self._add_help_option() - - def _init_parsing_state(self): - # These are set in parse_args() for the convenience of callbacks. - self.rargs = None - self.largs = None - self.values = None - - - # -- Simple modifier methods --------------------------------------- - - def set_usage(self, usage): - if usage is None: - self.usage = _("%prog [options]") - elif usage is SUPPRESS_USAGE: - self.usage = None - # For backwards compatibility with Optik 1.3 and earlier. - elif usage.lower().startswith("usage: "): - self.usage = usage[7:] - else: - self.usage = usage - - def enable_interspersed_args(self): - """Set parsing to not stop on the first non-option, allowing - interspersing switches with command arguments. This is the - default behavior. See also disable_interspersed_args() and the - class documentation description of the attribute - allow_interspersed_args.""" - self.allow_interspersed_args = True - - def disable_interspersed_args(self): - """Set parsing to stop on the first non-option. Use this if - you have a command processor which runs another command that - has options of its own and you want to make sure these options - don't get confused. - """ - self.allow_interspersed_args = False - - def set_process_default_values(self, process): - self.process_default_values = process - - def set_default(self, dest, value): - self.defaults[dest] = value - - def set_defaults(self, **kwargs): - self.defaults.update(kwargs) - - def _get_all_options(self): - options = self.option_list[:] - for group in self.option_groups: - options.extend(group.option_list) - return options - - def get_default_values(self): - if not self.process_default_values: - # Old, pre-Optik 1.5 behaviour. - return Values(self.defaults) - - defaults = self.defaults.copy() - for option in self._get_all_options(): - default = defaults.get(option.dest) - if isbasestring(default): - opt_str = option.get_opt_string() - defaults[option.dest] = option.check_value(opt_str, default) - - return Values(defaults) - - - # -- OptionGroup methods ------------------------------------------- - - def add_option_group(self, *args, **kwargs): - # XXX lots of overlap with OptionContainer.add_option() - if type(args[0]) is types.StringType: - group = OptionGroup(self, *args, **kwargs) - elif len(args) == 1 and not kwargs: - group = args[0] - if not isinstance(group, OptionGroup): - raise TypeError, "not an OptionGroup instance: %r" % group - if group.parser is not self: - raise ValueError, "invalid OptionGroup (wrong parser)" - else: - raise TypeError, "invalid arguments" - - self.option_groups.append(group) - return group - - def get_option_group(self, opt_str): - option = (self._short_opt.get(opt_str) or - self._long_opt.get(opt_str)) - if option and option.container is not self: - return option.container - return None - - - # -- Option-parsing methods ---------------------------------------- - - def _get_args(self, args): - if args is None: - return sys.argv[1:] - else: - return args[:] # don't modify caller's list - - def parse_args(self, args=None, values=None): - """ - parse_args(args : [string] = sys.argv[1:], - values : Values = None) - -> (values : Values, args : [string]) - - Parse the command-line options found in 'args' (default: - sys.argv[1:]). Any errors result in a call to 'error()', which - by default prints the usage message to stderr and calls - sys.exit() with an error message. On success returns a pair - (values, args) where 'values' is an Values instance (with all - your option values) and 'args' is the list of arguments left - over after parsing options. - """ - rargs = self._get_args(args) - if values is None: - values = self.get_default_values() - - # Store the halves of the argument list as attributes for the - # convenience of callbacks: - # rargs - # the rest of the command-line (the "r" stands for - # "remaining" or "right-hand") - # largs - # the leftover arguments -- ie. what's left after removing - # options and their arguments (the "l" stands for "leftover" - # or "left-hand") - self.rargs = rargs - self.largs = largs = [] - self.values = values - - try: - stop = self._process_args(largs, rargs, values) - except (BadOptionError, OptionValueError), err: - self.error(str(err)) - - args = largs + rargs - return self.check_values(values, args) - - def check_values(self, values, args): - """ - check_values(values : Values, args : [string]) - -> (values : Values, args : [string]) - - Check that the supplied option values and leftover arguments are - valid. Returns the option values and leftover arguments - (possibly adjusted, possibly completely new -- whatever you - like). Default implementation just returns the passed-in - values; subclasses may override as desired. - """ - return (values, args) - - def _process_args(self, largs, rargs, values): - """_process_args(largs : [string], - rargs : [string], - values : Values) - - Process command-line arguments and populate 'values', consuming - options and arguments from 'rargs'. If 'allow_interspersed_args' is - false, stop at the first non-option argument. If true, accumulate any - interspersed non-option arguments in 'largs'. - """ - while rargs: - arg = rargs[0] - # We handle bare "--" explicitly, and bare "-" is handled by the - # standard arg handler since the short arg case ensures that the - # len of the opt string is greater than 1. - if arg == "--": - del rargs[0] - return - elif arg[0:2] == "--": - # process a single long option (possibly with value(s)) - self._process_long_opt(rargs, values) - elif arg[:1] == "-" and len(arg) > 1: - # process a cluster of short options (possibly with - # value(s) for the last one only) - self._process_short_opts(rargs, values) - elif self.allow_interspersed_args: - largs.append(arg) - del rargs[0] - else: - return # stop now, leave this arg in rargs - - # Say this is the original argument list: - # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)] - # ^ - # (we are about to process arg(i)). - # - # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of - # [arg0, ..., arg(i-1)] (any options and their arguments will have - # been removed from largs). - # - # The while loop will usually consume 1 or more arguments per pass. - # If it consumes 1 (eg. arg is an option that takes no arguments), - # then after _process_arg() is done the situation is: - # - # largs = subset of [arg0, ..., arg(i)] - # rargs = [arg(i+1), ..., arg(N-1)] - # - # If allow_interspersed_args is false, largs will always be - # *empty* -- still a subset of [arg0, ..., arg(i-1)], but - # not a very interesting subset! - - def _match_long_opt(self, opt): - """_match_long_opt(opt : string) -> string - - Determine which long option string 'opt' matches, ie. which one - it is an unambiguous abbrevation for. Raises BadOptionError if - 'opt' doesn't unambiguously match any long option string. - """ - return _match_abbrev(opt, self._long_opt) - - def _process_long_opt(self, rargs, values): - arg = rargs.pop(0) - - # Value explicitly attached to arg? Pretend it's the next - # argument. - if "=" in arg: - (opt, next_arg) = arg.split("=", 1) - rargs.insert(0, next_arg) - had_explicit_value = True - else: - opt = arg - had_explicit_value = False - - opt = self._match_long_opt(opt) - option = self._long_opt[opt] - if option.takes_value(): - nargs = option.nargs - if len(rargs) < nargs: - if nargs == 1: - self.error(_("%s option requires an argument") % opt) - else: - self.error(_("%s option requires %d arguments") - % (opt, nargs)) - elif nargs == 1: - value = rargs.pop(0) - else: - value = tuple(rargs[0:nargs]) - del rargs[0:nargs] - - elif had_explicit_value: - self.error(_("%s option does not take a value") % opt) - - else: - value = None - - option.process(opt, value, values, self) - - def _process_short_opts(self, rargs, values): - arg = rargs.pop(0) - stop = False - i = 1 - for ch in arg[1:]: - opt = "-" + ch - option = self._short_opt.get(opt) - i += 1 # we have consumed a character - - if not option: - raise BadOptionError(opt) - if option.takes_value(): - # Any characters left in arg? Pretend they're the - # next arg, and stop consuming characters of arg. - if i < len(arg): - rargs.insert(0, arg[i:]) - stop = True - - nargs = option.nargs - if len(rargs) < nargs: - if nargs == 1: - self.error(_("%s option requires an argument") % opt) - else: - self.error(_("%s option requires %d arguments") - % (opt, nargs)) - elif nargs == 1: - value = rargs.pop(0) - else: - value = tuple(rargs[0:nargs]) - del rargs[0:nargs] - - else: # option doesn't take a value - value = None - - option.process(opt, value, values, self) - - if stop: - break - - - # -- Feedback methods ---------------------------------------------- - - def get_prog_name(self): - if self.prog is None: - return os.path.basename(sys.argv[0]) - else: - return self.prog - - def expand_prog_name(self, s): - return s.replace("%prog", self.get_prog_name()) - - def get_description(self): - return self.expand_prog_name(self.description) - - def exit(self, status=0, msg=None): - if msg: - sys.stderr.write(msg) - sys.exit(status) - - def error(self, msg): - """error(msg : string) - - Print a usage message incorporating 'msg' to stderr and exit. - If you override this in a subclass, it should not return -- it - should either exit or raise an exception. - """ - self.print_usage(sys.stderr) - self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg)) - - def get_usage(self): - if self.usage: - return self.formatter.format_usage( - self.expand_prog_name(self.usage)) - else: - return "" - - def print_usage(self, file=None): - """print_usage(file : file = stdout) - - Print the usage message for the current program (self.usage) to - 'file' (default stdout). Any occurrence of the string "%prog" in - self.usage is replaced with the name of the current program - (basename of sys.argv[0]). Does nothing if self.usage is empty - or not defined. - """ - if self.usage: - print >>file, self.get_usage() - - def get_version(self): - if self.version: - return self.expand_prog_name(self.version) - else: - return "" - - def print_version(self, file=None): - """print_version(file : file = stdout) - - Print the version message for this program (self.version) to - 'file' (default stdout). As with print_usage(), any occurrence - of "%prog" in self.version is replaced by the current program's - name. Does nothing if self.version is empty or undefined. - """ - if self.version: - print >>file, self.get_version() - - def format_option_help(self, formatter=None): - if formatter is None: - formatter = self.formatter - formatter.store_option_strings(self) - result = [] - result.append(formatter.format_heading(_("Options"))) - formatter.indent() - if self.option_list: - result.append(OptionContainer.format_option_help(self, formatter)) - result.append("\n") - for group in self.option_groups: - result.append(group.format_help(formatter)) - result.append("\n") - formatter.dedent() - # Drop the last "\n", or the header if no options or option groups: - return "".join(result[:-1]) - - def format_epilog(self, formatter): - return formatter.format_epilog(self.epilog) - - def format_help(self, formatter=None): - if formatter is None: - formatter = self.formatter - result = [] - if self.usage: - result.append(self.get_usage() + "\n") - if self.description: - result.append(self.format_description(formatter) + "\n") - result.append(self.format_option_help(formatter)) - result.append(self.format_epilog(formatter)) - return "".join(result) - - # used by test suite - def _get_encoding(self, file): - encoding = getattr(file, "encoding", None) - if not encoding: - encoding = sys.getdefaultencoding() - return encoding - - def print_help(self, file=None): - """print_help(file : file = stdout) - - Print an extended help message, listing all options and any - help text provided with them, to 'file' (default stdout). - """ - if file is None: - file = sys.stdout - encoding = self._get_encoding(file) - file.write(self.format_help().encode(encoding, "replace")) - -# class OptionParser - - -def _match_abbrev(s, wordmap): - """_match_abbrev(s : string, wordmap : {string : Option}) -> string - - Return the string key in 'wordmap' for which 's' is an unambiguous - abbreviation. If 's' is found to be ambiguous or doesn't match any of - 'words', raise BadOptionError. - """ - # Is there an exact match? - if s in wordmap: - return s - else: - # Isolate all words with s as a prefix. - possibilities = [word for word in wordmap.keys() - if word.startswith(s)] - # No exact match, so there had better be just one possibility. - if len(possibilities) == 1: - return possibilities[0] - elif not possibilities: - raise BadOptionError(s) - else: - # More than one possible completion: ambiguous prefix. - possibilities.sort() - raise AmbiguousOptionError(s, possibilities) - - -# Some day, there might be many Option classes. As of Optik 1.3, the -# preferred way to instantiate Options is indirectly, via make_option(), -# which will become a factory function when there are many Option -# classes. -make_option = Option diff --git a/icarus-miner/data/usr/lib/python2.6/optparse.pyc b/icarus-miner/data/usr/lib/python2.6/optparse.pyc deleted file mode 100644 index a0b9a64..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/optparse.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/os.py b/icarus-miner/data/usr/lib/python2.6/os.py deleted file mode 100644 index 94476a3..0000000 --- a/icarus-miner/data/usr/lib/python2.6/os.py +++ /dev/null @@ -1,763 +0,0 @@ -r"""OS routines for Mac, NT, or Posix depending on what system we're on. - -This exports: - - all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc. - - os.path is one of the modules posixpath, or ntpath - - os.name is 'posix', 'nt', 'os2', 'ce' or 'riscos' - - os.curdir is a string representing the current directory ('.' or ':') - - os.pardir is a string representing the parent directory ('..' or '::') - - os.sep is the (or a most common) pathname separator ('/' or ':' or '\\') - - os.extsep is the extension separator ('.' or '/') - - os.altsep is the alternate pathname separator (None or '/') - - os.pathsep is the component separator used in $PATH etc - - os.linesep is the line separator in text files ('\r' or '\n' or '\r\n') - - os.defpath is the default search path for executables - - os.devnull is the file path of the null device ('/dev/null', etc.) - -Programs that import and use 'os' stand a better chance of being -portable between different platforms. Of course, they must then -only use functions that are defined by all platforms (e.g., unlink -and opendir), and leave all pathname manipulation to os.path -(e.g., split and join). -""" - -#' - -import sys, errno - -_names = sys.builtin_module_names - -# Note: more names are added to __all__ later. -__all__ = ["altsep", "curdir", "pardir", "sep", "extsep", "pathsep", "linesep", - "defpath", "name", "path", "devnull", - "SEEK_SET", "SEEK_CUR", "SEEK_END"] - -def _get_exports_list(module): - try: - return list(module.__all__) - except AttributeError: - return [n for n in dir(module) if n[0] != '_'] - -if 'posix' in _names: - name = 'posix' - linesep = '\n' - from posix import * - try: - from posix import _exit - except ImportError: - pass - import posixpath as path - - import posix - __all__.extend(_get_exports_list(posix)) - del posix - -elif 'nt' in _names: - name = 'nt' - linesep = '\r\n' - from nt import * - try: - from nt import _exit - except ImportError: - pass - import ntpath as path - - import nt - __all__.extend(_get_exports_list(nt)) - del nt - -elif 'os2' in _names: - name = 'os2' - linesep = '\r\n' - from os2 import * - try: - from os2 import _exit - except ImportError: - pass - if sys.version.find('EMX GCC') == -1: - import ntpath as path - else: - import os2emxpath as path - from _emx_link import link - - import os2 - __all__.extend(_get_exports_list(os2)) - del os2 - -elif 'ce' in _names: - name = 'ce' - linesep = '\r\n' - from ce import * - try: - from ce import _exit - except ImportError: - pass - # We can use the standard Windows path. - import ntpath as path - - import ce - __all__.extend(_get_exports_list(ce)) - del ce - -elif 'riscos' in _names: - name = 'riscos' - linesep = '\n' - from riscos import * - try: - from riscos import _exit - except ImportError: - pass - import riscospath as path - - import riscos - __all__.extend(_get_exports_list(riscos)) - del riscos - -else: - raise ImportError, 'no os specific module found' - -sys.modules['os.path'] = path -from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep, - devnull) - -del _names - -# Python uses fixed values for the SEEK_ constants; they are mapped -# to native constants if necessary in posixmodule.c -SEEK_SET = 0 -SEEK_CUR = 1 -SEEK_END = 2 - -#' - -# Super directory utilities. -# (Inspired by Eric Raymond; the doc strings are mostly his) - -def makedirs(name, mode=0777): - """makedirs(path [, mode=0777]) - - Super-mkdir; create a leaf directory and all intermediate ones. - Works like mkdir, except that any intermediate path segment (not - just the rightmost) will be created if it does not exist. This is - recursive. - - """ - head, tail = path.split(name) - if not tail: - head, tail = path.split(head) - if head and tail and not path.exists(head): - try: - makedirs(head, mode) - except OSError, e: - # be happy if someone already created the path - if e.errno != errno.EEXIST: - raise - if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists - return - mkdir(name, mode) - -def removedirs(name): - """removedirs(path) - - Super-rmdir; remove a leaf directory and all empty intermediate - ones. Works like rmdir except that, if the leaf directory is - successfully removed, directories corresponding to rightmost path - segments will be pruned away until either the whole path is - consumed or an error occurs. Errors during this latter phase are - ignored -- they generally mean that a directory was not empty. - - """ - rmdir(name) - head, tail = path.split(name) - if not tail: - head, tail = path.split(head) - while head and tail: - try: - rmdir(head) - except error: - break - head, tail = path.split(head) - -def renames(old, new): - """renames(old, new) - - Super-rename; create directories as necessary and delete any left - empty. Works like rename, except creation of any intermediate - directories needed to make the new pathname good is attempted - first. After the rename, directories corresponding to rightmost - path segments of the old name will be pruned way until either the - whole path is consumed or a nonempty directory is found. - - Note: this function can fail with the new directory structure made - if you lack permissions needed to unlink the leaf directory or - file. - - """ - head, tail = path.split(new) - if head and tail and not path.exists(head): - makedirs(head) - rename(old, new) - head, tail = path.split(old) - if head and tail: - try: - removedirs(head) - except error: - pass - -__all__.extend(["makedirs", "removedirs", "renames"]) - -def walk(top, topdown=True, onerror=None, followlinks=False): - """Directory tree generator. - - For each directory in the directory tree rooted at top (including top - itself, but excluding '.' and '..'), yields a 3-tuple - - dirpath, dirnames, filenames - - dirpath is a string, the path to the directory. dirnames is a list of - the names of the subdirectories in dirpath (excluding '.' and '..'). - filenames is a list of the names of the non-directory files in dirpath. - Note that the names in the lists are just names, with no path components. - To get a full path (which begins with top) to a file or directory in - dirpath, do os.path.join(dirpath, name). - - If optional arg 'topdown' is true or not specified, the triple for a - directory is generated before the triples for any of its subdirectories - (directories are generated top down). If topdown is false, the triple - for a directory is generated after the triples for all of its - subdirectories (directories are generated bottom up). - - When topdown is true, the caller can modify the dirnames list in-place - (e.g., via del or slice assignment), and walk will only recurse into the - subdirectories whose names remain in dirnames; this can be used to prune - the search, or to impose a specific order of visiting. Modifying - dirnames when topdown is false is ineffective, since the directories in - dirnames have already been generated by the time dirnames itself is - generated. - - By default errors from the os.listdir() call are ignored. If - optional arg 'onerror' is specified, it should be a function; it - will be called with one argument, an os.error instance. It can - report the error to continue with the walk, or raise the exception - to abort the walk. Note that the filename is available as the - filename attribute of the exception object. - - By default, os.walk does not follow symbolic links to subdirectories on - systems that support them. In order to get this functionality, set the - optional argument 'followlinks' to true. - - Caution: if you pass a relative pathname for top, don't change the - current working directory between resumptions of walk. walk never - changes the current directory, and assumes that the client doesn't - either. - - Example: - - import os - from os.path import join, getsize - for root, dirs, files in os.walk('python/Lib/email'): - print root, "consumes", - print sum([getsize(join(root, name)) for name in files]), - print "bytes in", len(files), "non-directory files" - if 'CVS' in dirs: - dirs.remove('CVS') # don't visit CVS directories - """ - - from os.path import join, isdir, islink - - # We may not have read permission for top, in which case we can't - # get a list of the files the directory contains. os.path.walk - # always suppressed the exception then, rather than blow up for a - # minor reason when (say) a thousand readable directories are still - # left to visit. That logic is copied here. - try: - # Note that listdir and error are globals in this module due - # to earlier import-*. - names = listdir(top) - except error, err: - if onerror is not None: - onerror(err) - return - - dirs, nondirs = [], [] - for name in names: - if isdir(join(top, name)): - dirs.append(name) - else: - nondirs.append(name) - - if topdown: - yield top, dirs, nondirs - for name in dirs: - path = join(top, name) - if followlinks or not islink(path): - for x in walk(path, topdown, onerror, followlinks): - yield x - if not topdown: - yield top, dirs, nondirs - -__all__.append("walk") - -# Make sure os.environ exists, at least -try: - environ -except NameError: - environ = {} - -def execl(file, *args): - """execl(file, *args) - - Execute the executable file with argument list args, replacing the - current process. """ - execv(file, args) - -def execle(file, *args): - """execle(file, *args, env) - - Execute the executable file with argument list args and - environment env, replacing the current process. """ - env = args[-1] - execve(file, args[:-1], env) - -def execlp(file, *args): - """execlp(file, *args) - - Execute the executable file (which is searched for along $PATH) - with argument list args, replacing the current process. """ - execvp(file, args) - -def execlpe(file, *args): - """execlpe(file, *args, env) - - Execute the executable file (which is searched for along $PATH) - with argument list args and environment env, replacing the current - process. """ - env = args[-1] - execvpe(file, args[:-1], env) - -def execvp(file, args): - """execp(file, args) - - Execute the executable file (which is searched for along $PATH) - with argument list args, replacing the current process. - args may be a list or tuple of strings. """ - _execvpe(file, args) - -def execvpe(file, args, env): - """execvpe(file, args, env) - - Execute the executable file (which is searched for along $PATH) - with argument list args and environment env , replacing the - current process. - args may be a list or tuple of strings. """ - _execvpe(file, args, env) - -__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"]) - -def _execvpe(file, args, env=None): - if env is not None: - func = execve - argrest = (args, env) - else: - func = execv - argrest = (args,) - env = environ - - head, tail = path.split(file) - if head: - func(file, *argrest) - return - if 'PATH' in env: - envpath = env['PATH'] - else: - envpath = defpath - PATH = envpath.split(pathsep) - saved_exc = None - saved_tb = None - for dir in PATH: - fullname = path.join(dir, file) - try: - func(fullname, *argrest) - except error, e: - tb = sys.exc_info()[2] - if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR - and saved_exc is None): - saved_exc = e - saved_tb = tb - if saved_exc: - raise error, saved_exc, saved_tb - raise error, e, tb - -# Change environ to automatically call putenv() if it exists -try: - # This will fail if there's no putenv - putenv -except NameError: - pass -else: - import UserDict - - # Fake unsetenv() for Windows - # not sure about os2 here but - # I'm guessing they are the same. - - if name in ('os2', 'nt'): - def unsetenv(key): - putenv(key, "") - - if name == "riscos": - # On RISC OS, all env access goes through getenv and putenv - from riscosenviron import _Environ - elif name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE - # But we store them as upper case - class _Environ(UserDict.IterableUserDict): - def __init__(self, environ): - UserDict.UserDict.__init__(self) - data = self.data - for k, v in environ.items(): - data[k.upper()] = v - def __setitem__(self, key, item): - putenv(key, item) - self.data[key.upper()] = item - def __getitem__(self, key): - return self.data[key.upper()] - try: - unsetenv - except NameError: - def __delitem__(self, key): - del self.data[key.upper()] - else: - def __delitem__(self, key): - unsetenv(key) - del self.data[key.upper()] - def clear(self): - for key in self.data.keys(): - unsetenv(key) - del self.data[key] - def pop(self, key, *args): - unsetenv(key) - return self.data.pop(key.upper(), *args) - def has_key(self, key): - return key.upper() in self.data - def __contains__(self, key): - return key.upper() in self.data - def get(self, key, failobj=None): - return self.data.get(key.upper(), failobj) - def update(self, dict=None, **kwargs): - if dict: - try: - keys = dict.keys() - except AttributeError: - # List of (key, value) - for k, v in dict: - self[k] = v - else: - # got keys - # cannot use items(), since mappings - # may not have them. - for k in keys: - self[k] = dict[k] - if kwargs: - self.update(kwargs) - def copy(self): - return dict(self) - - else: # Where Env Var Names Can Be Mixed Case - class _Environ(UserDict.IterableUserDict): - def __init__(self, environ): - UserDict.UserDict.__init__(self) - self.data = environ - def __setitem__(self, key, item): - putenv(key, item) - self.data[key] = item - def update(self, dict=None, **kwargs): - if dict: - try: - keys = dict.keys() - except AttributeError: - # List of (key, value) - for k, v in dict: - self[k] = v - else: - # got keys - # cannot use items(), since mappings - # may not have them. - for k in keys: - self[k] = dict[k] - if kwargs: - self.update(kwargs) - try: - unsetenv - except NameError: - pass - else: - def __delitem__(self, key): - unsetenv(key) - del self.data[key] - def clear(self): - for key in self.data.keys(): - unsetenv(key) - del self.data[key] - def pop(self, key, *args): - unsetenv(key) - return self.data.pop(key, *args) - def copy(self): - return dict(self) - - - environ = _Environ(environ) - -def getenv(key, default=None): - """Get an environment variable, return None if it doesn't exist. - The optional second argument can specify an alternate default.""" - return environ.get(key, default) -__all__.append("getenv") - -def _exists(name): - try: - eval(name) - return True - except NameError: - return False - -# Supply spawn*() (probably only for Unix) -if _exists("fork") and not _exists("spawnv") and _exists("execv"): - - P_WAIT = 0 - P_NOWAIT = P_NOWAITO = 1 - - # XXX Should we support P_DETACH? I suppose it could fork()**2 - # and close the std I/O streams. Also, P_OVERLAY is the same - # as execv*()? - - def _spawnvef(mode, file, args, env, func): - # Internal helper; func is the exec*() function to use - pid = fork() - if not pid: - # Child - try: - if env is None: - func(file, args) - else: - func(file, args, env) - except: - _exit(127) - else: - # Parent - if mode == P_NOWAIT: - return pid # Caller is responsible for waiting! - while 1: - wpid, sts = waitpid(pid, 0) - if WIFSTOPPED(sts): - continue - elif WIFSIGNALED(sts): - return -WTERMSIG(sts) - elif WIFEXITED(sts): - return WEXITSTATUS(sts) - else: - raise error, "Not stopped, signaled or exited???" - - def spawnv(mode, file, args): - """spawnv(mode, file, args) -> integer - -Execute file with arguments from args in a subprocess. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - return _spawnvef(mode, file, args, None, execv) - - def spawnve(mode, file, args, env): - """spawnve(mode, file, args, env) -> integer - -Execute file with arguments from args in a subprocess with the -specified environment. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - return _spawnvef(mode, file, args, env, execve) - - # Note: spawnvp[e] is't currently supported on Windows - - def spawnvp(mode, file, args): - """spawnvp(mode, file, args) -> integer - -Execute file (which is looked for along $PATH) with arguments from -args in a subprocess. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - return _spawnvef(mode, file, args, None, execvp) - - def spawnvpe(mode, file, args, env): - """spawnvpe(mode, file, args, env) -> integer - -Execute file (which is looked for along $PATH) with arguments from -args in a subprocess with the supplied environment. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - return _spawnvef(mode, file, args, env, execvpe) - -if _exists("spawnv"): - # These aren't supplied by the basic Windows code - # but can be easily implemented in Python - - def spawnl(mode, file, *args): - """spawnl(mode, file, *args) -> integer - -Execute file with arguments from args in a subprocess. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - return spawnv(mode, file, args) - - def spawnle(mode, file, *args): - """spawnle(mode, file, *args, env) -> integer - -Execute file with arguments from args in a subprocess with the -supplied environment. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - env = args[-1] - return spawnve(mode, file, args[:-1], env) - - - __all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",]) - - -if _exists("spawnvp"): - # At the moment, Windows doesn't implement spawnvp[e], - # so it won't have spawnlp[e] either. - def spawnlp(mode, file, *args): - """spawnlp(mode, file, *args) -> integer - -Execute file (which is looked for along $PATH) with arguments from -args in a subprocess with the supplied environment. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - return spawnvp(mode, file, args) - - def spawnlpe(mode, file, *args): - """spawnlpe(mode, file, *args, env) -> integer - -Execute file (which is looked for along $PATH) with arguments from -args in a subprocess with the supplied environment. -If mode == P_NOWAIT return the pid of the process. -If mode == P_WAIT return the process's exit code if it exits normally; -otherwise return -SIG, where SIG is the signal that killed it. """ - env = args[-1] - return spawnvpe(mode, file, args[:-1], env) - - - __all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",]) - - -# Supply popen2 etc. (for Unix) -if _exists("fork"): - if not _exists("popen2"): - def popen2(cmd, mode="t", bufsize=-1): - """Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' - may be a sequence, in which case arguments will be passed directly to - the program without shell intervention (as with os.spawnv()). If 'cmd' - is a string it will be passed to the shell (as with os.system()). If - 'bufsize' is specified, it sets the buffer size for the I/O pipes. The - file objects (child_stdin, child_stdout) are returned.""" - import warnings - msg = "os.popen2 is deprecated. Use the subprocess module." - warnings.warn(msg, DeprecationWarning, stacklevel=2) - - import subprocess - PIPE = subprocess.PIPE - p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring), - bufsize=bufsize, stdin=PIPE, stdout=PIPE, - close_fds=True) - return p.stdin, p.stdout - __all__.append("popen2") - - if not _exists("popen3"): - def popen3(cmd, mode="t", bufsize=-1): - """Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' - may be a sequence, in which case arguments will be passed directly to - the program without shell intervention (as with os.spawnv()). If 'cmd' - is a string it will be passed to the shell (as with os.system()). If - 'bufsize' is specified, it sets the buffer size for the I/O pipes. The - file objects (child_stdin, child_stdout, child_stderr) are returned.""" - import warnings - msg = "os.popen3 is deprecated. Use the subprocess module." - warnings.warn(msg, DeprecationWarning, stacklevel=2) - - import subprocess - PIPE = subprocess.PIPE - p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring), - bufsize=bufsize, stdin=PIPE, stdout=PIPE, - stderr=PIPE, close_fds=True) - return p.stdin, p.stdout, p.stderr - __all__.append("popen3") - - if not _exists("popen4"): - def popen4(cmd, mode="t", bufsize=-1): - """Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' - may be a sequence, in which case arguments will be passed directly to - the program without shell intervention (as with os.spawnv()). If 'cmd' - is a string it will be passed to the shell (as with os.system()). If - 'bufsize' is specified, it sets the buffer size for the I/O pipes. The - file objects (child_stdin, child_stdout_stderr) are returned.""" - import warnings - msg = "os.popen4 is deprecated. Use the subprocess module." - warnings.warn(msg, DeprecationWarning, stacklevel=2) - - import subprocess - PIPE = subprocess.PIPE - p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring), - bufsize=bufsize, stdin=PIPE, stdout=PIPE, - stderr=subprocess.STDOUT, close_fds=True) - return p.stdin, p.stdout - __all__.append("popen4") - -import copy_reg as _copy_reg - -def _make_stat_result(tup, dict): - return stat_result(tup, dict) - -def _pickle_stat_result(sr): - (type, args) = sr.__reduce__() - return (_make_stat_result, args) - -try: - _copy_reg.pickle(stat_result, _pickle_stat_result, _make_stat_result) -except NameError: # stat_result may not exist - pass - -def _make_statvfs_result(tup, dict): - return statvfs_result(tup, dict) - -def _pickle_statvfs_result(sr): - (type, args) = sr.__reduce__() - return (_make_statvfs_result, args) - -try: - _copy_reg.pickle(statvfs_result, _pickle_statvfs_result, - _make_statvfs_result) -except NameError: # statvfs_result may not exist - pass - -if not _exists("urandom"): - def urandom(n): - """urandom(n) -> str - - Return a string of n random bytes suitable for cryptographic use. - - """ - try: - _urandomfd = open("/dev/urandom", O_RDONLY) - except (OSError, IOError): - raise NotImplementedError("/dev/urandom (or equivalent) not found") - try: - bs = b"" - while n - len(bs) >= 1: - bs += read(_urandomfd, n - len(bs)) - finally: - close(_urandomfd) - return bs diff --git a/icarus-miner/data/usr/lib/python2.6/os.pyc b/icarus-miner/data/usr/lib/python2.6/os.pyc deleted file mode 100644 index 8eddbe6..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/os.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/posixpath.py b/icarus-miner/data/usr/lib/python2.6/posixpath.py deleted file mode 100644 index cdce5af..0000000 --- a/icarus-miner/data/usr/lib/python2.6/posixpath.py +++ /dev/null @@ -1,405 +0,0 @@ -"""Common operations on Posix pathnames. - -Instead of importing this module directly, import os and refer to -this module as os.path. The "os.path" name is an alias for this -module on Posix systems; on other systems (e.g. Mac, Windows), -os.path provides the same operations in a manner specific to that -platform, and is an alias to another module (e.g. macpath, ntpath). - -Some of this can actually be useful on non-Posix systems too, e.g. -for manipulation of the pathname component of URLs. -""" - -import os -import stat -import genericpath -import warnings -from genericpath import * - -__all__ = ["normcase","isabs","join","splitdrive","split","splitext", - "basename","dirname","commonprefix","getsize","getmtime", - "getatime","getctime","islink","exists","lexists","isdir","isfile", - "ismount","walk","expanduser","expandvars","normpath","abspath", - "samefile","sameopenfile","samestat", - "curdir","pardir","sep","pathsep","defpath","altsep","extsep", - "devnull","realpath","supports_unicode_filenames","relpath"] - -# strings representing various path-related bits and pieces -curdir = '.' -pardir = '..' -extsep = '.' -sep = '/' -pathsep = ':' -defpath = ':/bin:/usr/bin' -altsep = None -devnull = '/dev/null' - -# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac. -# On MS-DOS this may also turn slashes into backslashes; however, other -# normalizations (such as optimizing '../' away) are not allowed -# (another function should be defined to do that). - -def normcase(s): - """Normalize case of pathname. Has no effect under Posix""" - return s - - -# Return whether a path is absolute. -# Trivial in Posix, harder on the Mac or MS-DOS. - -def isabs(s): - """Test whether a path is absolute""" - return s.startswith('/') - - -# Join pathnames. -# Ignore the previous parts if a part is absolute. -# Insert a '/' unless the first part is empty or already ends in '/'. - -def join(a, *p): - """Join two or more pathname components, inserting '/' as needed. - If any component is an absolute path, all previous path components - will be discarded.""" - path = a - for b in p: - if b.startswith('/'): - path = b - elif path == '' or path.endswith('/'): - path += b - else: - path += '/' + b - return path - - -# Split a path in head (everything up to the last '/') and tail (the -# rest). If the path ends in '/', tail will be empty. If there is no -# '/' in the path, head will be empty. -# Trailing '/'es are stripped from head unless it is the root. - -def split(p): - """Split a pathname. Returns tuple "(head, tail)" where "tail" is - everything after the final slash. Either part may be empty.""" - i = p.rfind('/') + 1 - head, tail = p[:i], p[i:] - if head and head != '/'*len(head): - head = head.rstrip('/') - return head, tail - - -# Split a path in root and extension. -# The extension is everything starting at the last dot in the last -# pathname component; the root is everything before that. -# It is always true that root + ext == p. - -def splitext(p): - return genericpath._splitext(p, sep, altsep, extsep) -splitext.__doc__ = genericpath._splitext.__doc__ - -# Split a pathname into a drive specification and the rest of the -# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty. - -def splitdrive(p): - """Split a pathname into drive and path. On Posix, drive is always - empty.""" - return '', p - - -# Return the tail (basename) part of a path, same as split(path)[1]. - -def basename(p): - """Returns the final component of a pathname""" - i = p.rfind('/') + 1 - return p[i:] - - -# Return the head (dirname) part of a path, same as split(path)[0]. - -def dirname(p): - """Returns the directory component of a pathname""" - i = p.rfind('/') + 1 - head = p[:i] - if head and head != '/'*len(head): - head = head.rstrip('/') - return head - - -# Is a path a symbolic link? -# This will always return false on systems where os.lstat doesn't exist. - -def islink(path): - """Test whether a path is a symbolic link""" - try: - st = os.lstat(path) - except (os.error, AttributeError): - return False - return stat.S_ISLNK(st.st_mode) - -# Being true for dangling symbolic links is also useful. - -def lexists(path): - """Test whether a path exists. Returns True for broken symbolic links""" - try: - st = os.lstat(path) - except os.error: - return False - return True - - -# Are two filenames really pointing to the same file? - -def samefile(f1, f2): - """Test whether two pathnames reference the same actual file""" - s1 = os.stat(f1) - s2 = os.stat(f2) - return samestat(s1, s2) - - -# Are two open files really referencing the same file? -# (Not necessarily the same file descriptor!) - -def sameopenfile(fp1, fp2): - """Test whether two open file objects reference the same file""" - s1 = os.fstat(fp1) - s2 = os.fstat(fp2) - return samestat(s1, s2) - - -# Are two stat buffers (obtained from stat, fstat or lstat) -# describing the same file? - -def samestat(s1, s2): - """Test whether two stat buffers reference the same file""" - return s1.st_ino == s2.st_ino and \ - s1.st_dev == s2.st_dev - - -# Is a path a mount point? -# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?) - -def ismount(path): - """Test whether a path is a mount point""" - try: - s1 = os.lstat(path) - s2 = os.lstat(join(path, '..')) - except os.error: - return False # It doesn't exist -- so not a mount point :-) - dev1 = s1.st_dev - dev2 = s2.st_dev - if dev1 != dev2: - return True # path/.. on a different device as path - ino1 = s1.st_ino - ino2 = s2.st_ino - if ino1 == ino2: - return True # path/.. is the same i-node as path - return False - - -# Directory tree walk. -# For each directory under top (including top itself, but excluding -# '.' and '..'), func(arg, dirname, filenames) is called, where -# dirname is the name of the directory and filenames is the list -# of files (and subdirectories etc.) in the directory. -# The func may modify the filenames list, to implement a filter, -# or to impose a different order of visiting. - -def walk(top, func, arg): - """Directory tree walk with callback function. - - For each directory in the directory tree rooted at top (including top - itself, but excluding '.' and '..'), call func(arg, dirname, fnames). - dirname is the name of the directory, and fnames a list of the names of - the files and subdirectories in dirname (excluding '.' and '..'). func - may modify the fnames list in-place (e.g. via del or slice assignment), - and walk will only recurse into the subdirectories whose names remain in - fnames; this can be used to implement a filter, or to impose a specific - order of visiting. No semantics are defined for, or required of, arg, - beyond that arg is always passed to func. It can be used, e.g., to pass - a filename pattern, or a mutable object designed to accumulate - statistics. Passing None for arg is common.""" - warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.", - stacklevel=2) - try: - names = os.listdir(top) - except os.error: - return - func(arg, top, names) - for name in names: - name = join(top, name) - try: - st = os.lstat(name) - except os.error: - continue - if stat.S_ISDIR(st.st_mode): - walk(name, func, arg) - - -# Expand paths beginning with '~' or '~user'. -# '~' means $HOME; '~user' means that user's home directory. -# If the path doesn't begin with '~', or if the user or $HOME is unknown, -# the path is returned unchanged (leaving error reporting to whatever -# function is called with the expanded path as argument). -# See also module 'glob' for expansion of *, ? and [...] in pathnames. -# (A function should also be defined to do full *sh-style environment -# variable expansion.) - -def expanduser(path): - """Expand ~ and ~user constructions. If user or $HOME is unknown, - do nothing.""" - if not path.startswith('~'): - return path - i = path.find('/', 1) - if i < 0: - i = len(path) - if i == 1: - if 'HOME' not in os.environ: - import pwd - userhome = pwd.getpwuid(os.getuid()).pw_dir - else: - userhome = os.environ['HOME'] - else: - import pwd - try: - pwent = pwd.getpwnam(path[1:i]) - except KeyError: - return path - userhome = pwent.pw_dir - userhome = userhome.rstrip('/') or userhome - return userhome + path[i:] - - -# Expand paths containing shell variable substitutions. -# This expands the forms $variable and ${variable} only. -# Non-existent variables are left unchanged. - -_varprog = None - -def expandvars(path): - """Expand shell variables of form $var and ${var}. Unknown variables - are left unchanged.""" - global _varprog - if '$' not in path: - return path - if not _varprog: - import re - _varprog = re.compile(r'\$(\w+|\{[^}]*\})') - i = 0 - while True: - m = _varprog.search(path, i) - if not m: - break - i, j = m.span(0) - name = m.group(1) - if name.startswith('{') and name.endswith('}'): - name = name[1:-1] - if name in os.environ: - tail = path[j:] - path = path[:i] + os.environ[name] - i = len(path) - path += tail - else: - i = j - return path - - -# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B. -# It should be understood that this may change the meaning of the path -# if it contains symbolic links! - -def normpath(path): - """Normalize path, eliminating double slashes, etc.""" - if path == '': - return '.' - initial_slashes = path.startswith('/') - # POSIX allows one or two initial slashes, but treats three or more - # as single slash. - if (initial_slashes and - path.startswith('//') and not path.startswith('///')): - initial_slashes = 2 - comps = path.split('/') - new_comps = [] - for comp in comps: - if comp in ('', '.'): - continue - if (comp != '..' or (not initial_slashes and not new_comps) or - (new_comps and new_comps[-1] == '..')): - new_comps.append(comp) - elif new_comps: - new_comps.pop() - comps = new_comps - path = '/'.join(comps) - if initial_slashes: - path = '/'*initial_slashes + path - return path or '.' - - -def abspath(path): - """Return an absolute path.""" - if not isabs(path): - path = join(os.getcwd(), path) - return normpath(path) - - -# Return a canonical path (i.e. the absolute location of a file on the -# filesystem). - -def realpath(filename): - """Return the canonical path of the specified filename, eliminating any -symbolic links encountered in the path.""" - if isabs(filename): - bits = ['/'] + filename.split('/')[1:] - else: - bits = [''] + filename.split('/') - - for i in range(2, len(bits)+1): - component = join(*bits[0:i]) - # Resolve symbolic links. - if islink(component): - resolved = _resolve_link(component) - if resolved is None: - # Infinite loop -- return original component + rest of the path - return abspath(join(*([component] + bits[i:]))) - else: - newpath = join(*([resolved] + bits[i:])) - return realpath(newpath) - - return abspath(filename) - - -def _resolve_link(path): - """Internal helper function. Takes a path and follows symlinks - until we either arrive at something that isn't a symlink, or - encounter a path we've seen before (meaning that there's a loop). - """ - paths_seen = [] - while islink(path): - if path in paths_seen: - # Already seen this path, so we must have a symlink loop - return None - paths_seen.append(path) - # Resolve where the link points to - resolved = os.readlink(path) - if not isabs(resolved): - dir = dirname(path) - path = normpath(join(dir, resolved)) - else: - path = normpath(resolved) - return path - -supports_unicode_filenames = False - -def relpath(path, start=curdir): - """Return a relative version of a path""" - - if not path: - raise ValueError("no path specified") - - start_list = abspath(start).split(sep) - path_list = abspath(path).split(sep) - - # Work out how much of the filepath is shared by start and path. - i = len(commonprefix([start_list, path_list])) - - rel_list = [pardir] * (len(start_list)-i) + path_list[i:] - if not rel_list: - return curdir - return join(*rel_list) diff --git a/icarus-miner/data/usr/lib/python2.6/posixpath.pyc b/icarus-miner/data/usr/lib/python2.6/posixpath.pyc deleted file mode 100644 index 2883b7d..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/posixpath.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/re.py b/icarus-miner/data/usr/lib/python2.6/re.py deleted file mode 100644 index 2cf7132..0000000 --- a/icarus-miner/data/usr/lib/python2.6/re.py +++ /dev/null @@ -1,327 +0,0 @@ -# -# Secret Labs' Regular Expression Engine -# -# re-compatible interface for the sre matching engine -# -# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. -# -# This version of the SRE library can be redistributed under CNRI's -# Python 1.6 license. For any other use, please contact Secret Labs -# AB (info@pythonware.com). -# -# Portions of this engine have been developed in cooperation with -# CNRI. Hewlett-Packard provided funding for 1.6 integration and -# other compatibility work. -# - -r"""Support for regular expressions (RE). - -This module provides regular expression matching operations similar to -those found in Perl. It supports both 8-bit and Unicode strings; both -the pattern and the strings being processed can contain null bytes and -characters outside the US ASCII range. - -Regular expressions can contain both special and ordinary characters. -Most ordinary characters, like "A", "a", or "0", are the simplest -regular expressions; they simply match themselves. You can -concatenate ordinary characters, so last matches the string 'last'. - -The special characters are: - "." Matches any character except a newline. - "^" Matches the start of the string. - "$" Matches the end of the string or just before the newline at - the end of the string. - "*" Matches 0 or more (greedy) repetitions of the preceding RE. - Greedy means that it will match as many repetitions as possible. - "+" Matches 1 or more (greedy) repetitions of the preceding RE. - "?" Matches 0 or 1 (greedy) of the preceding RE. - *?,+?,?? Non-greedy versions of the previous three special characters. - {m,n} Matches from m to n repetitions of the preceding RE. - {m,n}? Non-greedy version of the above. - "\\" Either escapes special characters or signals a special sequence. - [] Indicates a set of characters. - A "^" as the first character indicates a complementing set. - "|" A|B, creates an RE that will match either A or B. - (...) Matches the RE inside the parentheses. - The contents can be retrieved or matched later in the string. - (?iLmsux) Set the I, L, M, S, U, or X flag for the RE (see below). - (?:...) Non-grouping version of regular parentheses. - (?P...) The substring matched by the group is accessible by name. - (?P=name) Matches the text matched earlier by the group named name. - (?#...) A comment; ignored. - (?=...) Matches if ... matches next, but doesn't consume the string. - (?!...) Matches if ... doesn't match next. - (?<=...) Matches if preceded by ... (must be fixed length). - (?= 0x02020000: - __all__.append("finditer") - def finditer(pattern, string, flags=0): - """Return an iterator over all non-overlapping matches in the - string. For each match, the iterator returns a match object. - - Empty matches are included in the result.""" - return _compile(pattern, flags).finditer(string) - -def compile(pattern, flags=0): - "Compile a regular expression pattern, returning a pattern object." - return _compile(pattern, flags) - -def purge(): - "Clear the regular expression cache" - _cache.clear() - _cache_repl.clear() - -def template(pattern, flags=0): - "Compile a template pattern, returning a pattern object" - return _compile(pattern, flags|T) - -_alphanum = {} -for c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890': - _alphanum[c] = 1 -del c - -def escape(pattern): - "Escape all non-alphanumeric characters in pattern." - s = list(pattern) - alphanum = _alphanum - for i in range(len(pattern)): - c = pattern[i] - if c not in alphanum: - if c == "\000": - s[i] = "\\000" - else: - s[i] = "\\" + c - return pattern[:0].join(s) - -# -------------------------------------------------------------------- -# internals - -_cache = {} -_cache_repl = {} - -_pattern_type = type(sre_compile.compile("", 0)) - -_MAXCACHE = 100 - -def _compile(*key): - # internal: compile pattern - cachekey = (type(key[0]),) + key - p = _cache.get(cachekey) - if p is not None: - return p - pattern, flags = key - if isinstance(pattern, _pattern_type): - if flags: - raise ValueError('Cannot process flags argument with a compiled pattern') - return pattern - if not sre_compile.isstring(pattern): - raise TypeError, "first argument must be string or compiled pattern" - try: - p = sre_compile.compile(pattern, flags) - except error, v: - raise error, v # invalid expression - if len(_cache) >= _MAXCACHE: - _cache.clear() - _cache[cachekey] = p - return p - -def _compile_repl(*key): - # internal: compile replacement pattern - p = _cache_repl.get(key) - if p is not None: - return p - repl, pattern = key - try: - p = sre_parse.parse_template(repl, pattern) - except error, v: - raise error, v # invalid expression - if len(_cache_repl) >= _MAXCACHE: - _cache_repl.clear() - _cache_repl[key] = p - return p - -def _expand(pattern, match, template): - # internal: match.expand implementation hook - template = sre_parse.parse_template(template, pattern) - return sre_parse.expand_template(template, match) - -def _subx(pattern, template): - # internal: pattern.sub/subn implementation helper - template = _compile_repl(template, pattern) - if not template[0] and len(template[1]) == 1: - # literal replacement - return template[1][0] - def filter(match, template=template): - return sre_parse.expand_template(template, match) - return filter - -# register myself for pickling - -import copy_reg - -def _pickle(p): - return _compile, (p.pattern, p.flags) - -copy_reg.pickle(_pattern_type, _pickle, _compile) - -# -------------------------------------------------------------------- -# experimental stuff (see python-dev discussions for details) - -class Scanner: - def __init__(self, lexicon, flags=0): - from sre_constants import BRANCH, SUBPATTERN - self.lexicon = lexicon - # combine phrases into a compound pattern - p = [] - s = sre_parse.Pattern() - s.flags = flags - for phrase, action in lexicon: - p.append(sre_parse.SubPattern(s, [ - (SUBPATTERN, (len(p)+1, sre_parse.parse(phrase, flags))), - ])) - s.groups = len(p)+1 - p = sre_parse.SubPattern(s, [(BRANCH, (None, p))]) - self.scanner = sre_compile.compile(p) - def scan(self, string): - result = [] - append = result.append - match = self.scanner.scanner(string).match - i = 0 - while 1: - m = match() - if not m: - break - j = m.end() - if i == j: - break - action = self.lexicon[m.lastindex-1][1] - if hasattr(action, '__call__'): - self.match = m - action = action(self, m.group()) - if action is not None: - append(action) - i = j - return result, string[i:] diff --git a/icarus-miner/data/usr/lib/python2.6/re.pyc b/icarus-miner/data/usr/lib/python2.6/re.pyc deleted file mode 100644 index f50fa56..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/re.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/rfc822.py b/icarus-miner/data/usr/lib/python2.6/rfc822.py deleted file mode 100644 index 64cd702..0000000 --- a/icarus-miner/data/usr/lib/python2.6/rfc822.py +++ /dev/null @@ -1,1011 +0,0 @@ -"""RFC 2822 message manipulation. - -Note: This is only a very rough sketch of a full RFC-822 parser; in particular -the tokenizing of addresses does not adhere to all the quoting rules. - -Note: RFC 2822 is a long awaited update to RFC 822. This module should -conform to RFC 2822, and is thus mis-named (it's not worth renaming it). Some -effort at RFC 2822 updates have been made, but a thorough audit has not been -performed. Consider any RFC 2822 non-conformance to be a bug. - - RFC 2822: http://www.faqs.org/rfcs/rfc2822.html - RFC 822 : http://www.faqs.org/rfcs/rfc822.html (obsolete) - -Directions for use: - -To create a Message object: first open a file, e.g.: - - fp = open(file, 'r') - -You can use any other legal way of getting an open file object, e.g. use -sys.stdin or call os.popen(). Then pass the open file object to the Message() -constructor: - - m = Message(fp) - -This class can work with any input object that supports a readline method. If -the input object has seek and tell capability, the rewindbody method will -work; also illegal lines will be pushed back onto the input stream. If the -input object lacks seek but has an `unread' method that can push back a line -of input, Message will use that to push back illegal lines. Thus this class -can be used to parse messages coming from a buffered stream. - -The optional `seekable' argument is provided as a workaround for certain stdio -libraries in which tell() discards buffered data before discovering that the -lseek() system call doesn't work. For maximum portability, you should set the -seekable argument to zero to prevent that initial \code{tell} when passing in -an unseekable object such as a a file object created from a socket object. If -it is 1 on entry -- which it is by default -- the tell() method of the open -file object is called once; if this raises an exception, seekable is reset to -0. For other nonzero values of seekable, this test is not made. - -To get the text of a particular header there are several methods: - - str = m.getheader(name) - str = m.getrawheader(name) - -where name is the name of the header, e.g. 'Subject'. The difference is that -getheader() strips the leading and trailing whitespace, while getrawheader() -doesn't. Both functions retain embedded whitespace (including newlines) -exactly as they are specified in the header, and leave the case of the text -unchanged. - -For addresses and address lists there are functions - - realname, mailaddress = m.getaddr(name) - list = m.getaddrlist(name) - -where the latter returns a list of (realname, mailaddr) tuples. - -There is also a method - - time = m.getdate(name) - -which parses a Date-like field and returns a time-compatible tuple, -i.e. a tuple such as returned by time.localtime() or accepted by -time.mktime(). - -See the class definition for lower level access methods. - -There are also some utility functions here. -""" -# Cleanup and extensions by Eric S. Raymond - -import time - -from warnings import warnpy3k -warnpy3k("in 3.x, rfc822 has been removed in favor of the email package", - stacklevel=2) - -__all__ = ["Message","AddressList","parsedate","parsedate_tz","mktime_tz"] - -_blanklines = ('\r\n', '\n') # Optimization for islast() - - -class Message: - """Represents a single RFC 2822-compliant message.""" - - def __init__(self, fp, seekable = 1): - """Initialize the class instance and read the headers.""" - if seekable == 1: - # Exercise tell() to make sure it works - # (and then assume seek() works, too) - try: - fp.tell() - except (AttributeError, IOError): - seekable = 0 - self.fp = fp - self.seekable = seekable - self.startofheaders = None - self.startofbody = None - # - if self.seekable: - try: - self.startofheaders = self.fp.tell() - except IOError: - self.seekable = 0 - # - self.readheaders() - # - if self.seekable: - try: - self.startofbody = self.fp.tell() - except IOError: - self.seekable = 0 - - def rewindbody(self): - """Rewind the file to the start of the body (if seekable).""" - if not self.seekable: - raise IOError, "unseekable file" - self.fp.seek(self.startofbody) - - def readheaders(self): - """Read header lines. - - Read header lines up to the entirely blank line that terminates them. - The (normally blank) line that ends the headers is skipped, but not - included in the returned list. If a non-header line ends the headers, - (which is an error), an attempt is made to backspace over it; it is - never included in the returned list. - - The variable self.status is set to the empty string if all went well, - otherwise it is an error message. The variable self.headers is a - completely uninterpreted list of lines contained in the header (so - printing them will reproduce the header exactly as it appears in the - file). - """ - self.dict = {} - self.unixfrom = '' - self.headers = lst = [] - self.status = '' - headerseen = "" - firstline = 1 - startofline = unread = tell = None - if hasattr(self.fp, 'unread'): - unread = self.fp.unread - elif self.seekable: - tell = self.fp.tell - while 1: - if tell: - try: - startofline = tell() - except IOError: - startofline = tell = None - self.seekable = 0 - line = self.fp.readline() - if not line: - self.status = 'EOF in headers' - break - # Skip unix From name time lines - if firstline and line.startswith('From '): - self.unixfrom = self.unixfrom + line - continue - firstline = 0 - if headerseen and line[0] in ' \t': - # It's a continuation line. - lst.append(line) - x = (self.dict[headerseen] + "\n " + line.strip()) - self.dict[headerseen] = x.strip() - continue - elif self.iscomment(line): - # It's a comment. Ignore it. - continue - elif self.islast(line): - # Note! No pushback here! The delimiter line gets eaten. - break - headerseen = self.isheader(line) - if headerseen: - # It's a legal header line, save it. - lst.append(line) - self.dict[headerseen] = line[len(headerseen)+1:].strip() - continue - else: - # It's not a header line; throw it back and stop here. - if not self.dict: - self.status = 'No headers' - else: - self.status = 'Non-header line where header expected' - # Try to undo the read. - if unread: - unread(line) - elif tell: - self.fp.seek(startofline) - else: - self.status = self.status + '; bad seek' - break - - def isheader(self, line): - """Determine whether a given line is a legal header. - - This method should return the header name, suitably canonicalized. - You may override this method in order to use Message parsing on tagged - data in RFC 2822-like formats with special header formats. - """ - i = line.find(':') - if i > 0: - return line[:i].lower() - return None - - def islast(self, line): - """Determine whether a line is a legal end of RFC 2822 headers. - - You may override this method if your application wants to bend the - rules, e.g. to strip trailing whitespace, or to recognize MH template - separators ('--------'). For convenience (e.g. for code reading from - sockets) a line consisting of \r\n also matches. - """ - return line in _blanklines - - def iscomment(self, line): - """Determine whether a line should be skipped entirely. - - You may override this method in order to use Message parsing on tagged - data in RFC 2822-like formats that support embedded comments or - free-text data. - """ - return False - - def getallmatchingheaders(self, name): - """Find all header lines matching a given header name. - - Look through the list of headers and find all lines matching a given - header name (and their continuation lines). A list of the lines is - returned, without interpretation. If the header does not occur, an - empty list is returned. If the header occurs multiple times, all - occurrences are returned. Case is not important in the header name. - """ - name = name.lower() + ':' - n = len(name) - lst = [] - hit = 0 - for line in self.headers: - if line[:n].lower() == name: - hit = 1 - elif not line[:1].isspace(): - hit = 0 - if hit: - lst.append(line) - return lst - - def getfirstmatchingheader(self, name): - """Get the first header line matching name. - - This is similar to getallmatchingheaders, but it returns only the - first matching header (and its continuation lines). - """ - name = name.lower() + ':' - n = len(name) - lst = [] - hit = 0 - for line in self.headers: - if hit: - if not line[:1].isspace(): - break - elif line[:n].lower() == name: - hit = 1 - if hit: - lst.append(line) - return lst - - def getrawheader(self, name): - """A higher-level interface to getfirstmatchingheader(). - - Return a string containing the literal text of the header but with the - keyword stripped. All leading, trailing and embedded whitespace is - kept in the string, however. Return None if the header does not - occur. - """ - - lst = self.getfirstmatchingheader(name) - if not lst: - return None - lst[0] = lst[0][len(name) + 1:] - return ''.join(lst) - - def getheader(self, name, default=None): - """Get the header value for a name. - - This is the normal interface: it returns a stripped version of the - header value for a given header name, or None if it doesn't exist. - This uses the dictionary version which finds the *last* such header. - """ - return self.dict.get(name.lower(), default) - get = getheader - - def getheaders(self, name): - """Get all values for a header. - - This returns a list of values for headers given more than once; each - value in the result list is stripped in the same way as the result of - getheader(). If the header is not given, return an empty list. - """ - result = [] - current = '' - have_header = 0 - for s in self.getallmatchingheaders(name): - if s[0].isspace(): - if current: - current = "%s\n %s" % (current, s.strip()) - else: - current = s.strip() - else: - if have_header: - result.append(current) - current = s[s.find(":") + 1:].strip() - have_header = 1 - if have_header: - result.append(current) - return result - - def getaddr(self, name): - """Get a single address from a header, as a tuple. - - An example return value: - ('Guido van Rossum', 'guido@cwi.nl') - """ - # New, by Ben Escoto - alist = self.getaddrlist(name) - if alist: - return alist[0] - else: - return (None, None) - - def getaddrlist(self, name): - """Get a list of addresses from a header. - - Retrieves a list of addresses from a header, where each address is a - tuple as returned by getaddr(). Scans all named headers, so it works - properly with multiple To: or Cc: headers for example. - """ - raw = [] - for h in self.getallmatchingheaders(name): - if h[0] in ' \t': - raw.append(h) - else: - if raw: - raw.append(', ') - i = h.find(':') - if i > 0: - addr = h[i+1:] - raw.append(addr) - alladdrs = ''.join(raw) - a = AddressList(alladdrs) - return a.addresslist - - def getdate(self, name): - """Retrieve a date field from a header. - - Retrieves a date field from the named header, returning a tuple - compatible with time.mktime(). - """ - try: - data = self[name] - except KeyError: - return None - return parsedate(data) - - def getdate_tz(self, name): - """Retrieve a date field from a header as a 10-tuple. - - The first 9 elements make up a tuple compatible with time.mktime(), - and the 10th is the offset of the poster's time zone from GMT/UTC. - """ - try: - data = self[name] - except KeyError: - return None - return parsedate_tz(data) - - - # Access as a dictionary (only finds *last* header of each type): - - def __len__(self): - """Get the number of headers in a message.""" - return len(self.dict) - - def __getitem__(self, name): - """Get a specific header, as from a dictionary.""" - return self.dict[name.lower()] - - def __setitem__(self, name, value): - """Set the value of a header. - - Note: This is not a perfect inversion of __getitem__, because any - changed headers get stuck at the end of the raw-headers list rather - than where the altered header was. - """ - del self[name] # Won't fail if it doesn't exist - self.dict[name.lower()] = value - text = name + ": " + value - for line in text.split("\n"): - self.headers.append(line + "\n") - - def __delitem__(self, name): - """Delete all occurrences of a specific header, if it is present.""" - name = name.lower() - if not name in self.dict: - return - del self.dict[name] - name = name + ':' - n = len(name) - lst = [] - hit = 0 - for i in range(len(self.headers)): - line = self.headers[i] - if line[:n].lower() == name: - hit = 1 - elif not line[:1].isspace(): - hit = 0 - if hit: - lst.append(i) - for i in reversed(lst): - del self.headers[i] - - def setdefault(self, name, default=""): - lowername = name.lower() - if lowername in self.dict: - return self.dict[lowername] - else: - text = name + ": " + default - for line in text.split("\n"): - self.headers.append(line + "\n") - self.dict[lowername] = default - return default - - def has_key(self, name): - """Determine whether a message contains the named header.""" - return name.lower() in self.dict - - def __contains__(self, name): - """Determine whether a message contains the named header.""" - return name.lower() in self.dict - - def __iter__(self): - return iter(self.dict) - - def keys(self): - """Get all of a message's header field names.""" - return self.dict.keys() - - def values(self): - """Get all of a message's header field values.""" - return self.dict.values() - - def items(self): - """Get all of a message's headers. - - Returns a list of name, value tuples. - """ - return self.dict.items() - - def __str__(self): - return ''.join(self.headers) - - -# Utility functions -# ----------------- - -# XXX Should fix unquote() and quote() to be really conformant. -# XXX The inverses of the parse functions may also be useful. - - -def unquote(s): - """Remove quotes from a string.""" - if len(s) > 1: - if s.startswith('"') and s.endswith('"'): - return s[1:-1].replace('\\\\', '\\').replace('\\"', '"') - if s.startswith('<') and s.endswith('>'): - return s[1:-1] - return s - - -def quote(s): - """Add quotes around a string.""" - return s.replace('\\', '\\\\').replace('"', '\\"') - - -def parseaddr(address): - """Parse an address into a (realname, mailaddr) tuple.""" - a = AddressList(address) - lst = a.addresslist - if not lst: - return (None, None) - return lst[0] - - -class AddrlistClass: - """Address parser class by Ben Escoto. - - To understand what this class does, it helps to have a copy of - RFC 2822 in front of you. - - http://www.faqs.org/rfcs/rfc2822.html - - Note: this class interface is deprecated and may be removed in the future. - Use rfc822.AddressList instead. - """ - - def __init__(self, field): - """Initialize a new instance. - - `field' is an unparsed address header field, containing one or more - addresses. - """ - self.specials = '()<>@,:;.\"[]' - self.pos = 0 - self.LWS = ' \t' - self.CR = '\r\n' - self.atomends = self.specials + self.LWS + self.CR - # Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it - # is obsolete syntax. RFC 2822 requires that we recognize obsolete - # syntax, so allow dots in phrases. - self.phraseends = self.atomends.replace('.', '') - self.field = field - self.commentlist = [] - - def gotonext(self): - """Parse up to the start of the next address.""" - while self.pos < len(self.field): - if self.field[self.pos] in self.LWS + '\n\r': - self.pos = self.pos + 1 - elif self.field[self.pos] == '(': - self.commentlist.append(self.getcomment()) - else: break - - def getaddrlist(self): - """Parse all addresses. - - Returns a list containing all of the addresses. - """ - result = [] - ad = self.getaddress() - while ad: - result += ad - ad = self.getaddress() - return result - - def getaddress(self): - """Parse the next address.""" - self.commentlist = [] - self.gotonext() - - oldpos = self.pos - oldcl = self.commentlist - plist = self.getphraselist() - - self.gotonext() - returnlist = [] - - if self.pos >= len(self.field): - # Bad email address technically, no domain. - if plist: - returnlist = [(' '.join(self.commentlist), plist[0])] - - elif self.field[self.pos] in '.@': - # email address is just an addrspec - # this isn't very efficient since we start over - self.pos = oldpos - self.commentlist = oldcl - addrspec = self.getaddrspec() - returnlist = [(' '.join(self.commentlist), addrspec)] - - elif self.field[self.pos] == ':': - # address is a group - returnlist = [] - - fieldlen = len(self.field) - self.pos += 1 - while self.pos < len(self.field): - self.gotonext() - if self.pos < fieldlen and self.field[self.pos] == ';': - self.pos += 1 - break - returnlist = returnlist + self.getaddress() - - elif self.field[self.pos] == '<': - # Address is a phrase then a route addr - routeaddr = self.getrouteaddr() - - if self.commentlist: - returnlist = [(' '.join(plist) + ' (' + \ - ' '.join(self.commentlist) + ')', routeaddr)] - else: returnlist = [(' '.join(plist), routeaddr)] - - else: - if plist: - returnlist = [(' '.join(self.commentlist), plist[0])] - elif self.field[self.pos] in self.specials: - self.pos += 1 - - self.gotonext() - if self.pos < len(self.field) and self.field[self.pos] == ',': - self.pos += 1 - return returnlist - - def getrouteaddr(self): - """Parse a route address (Return-path value). - - This method just skips all the route stuff and returns the addrspec. - """ - if self.field[self.pos] != '<': - return - - expectroute = 0 - self.pos += 1 - self.gotonext() - adlist = "" - while self.pos < len(self.field): - if expectroute: - self.getdomain() - expectroute = 0 - elif self.field[self.pos] == '>': - self.pos += 1 - break - elif self.field[self.pos] == '@': - self.pos += 1 - expectroute = 1 - elif self.field[self.pos] == ':': - self.pos += 1 - else: - adlist = self.getaddrspec() - self.pos += 1 - break - self.gotonext() - - return adlist - - def getaddrspec(self): - """Parse an RFC 2822 addr-spec.""" - aslist = [] - - self.gotonext() - while self.pos < len(self.field): - if self.field[self.pos] == '.': - aslist.append('.') - self.pos += 1 - elif self.field[self.pos] == '"': - aslist.append('"%s"' % self.getquote()) - elif self.field[self.pos] in self.atomends: - break - else: aslist.append(self.getatom()) - self.gotonext() - - if self.pos >= len(self.field) or self.field[self.pos] != '@': - return ''.join(aslist) - - aslist.append('@') - self.pos += 1 - self.gotonext() - return ''.join(aslist) + self.getdomain() - - def getdomain(self): - """Get the complete domain name from an address.""" - sdlist = [] - while self.pos < len(self.field): - if self.field[self.pos] in self.LWS: - self.pos += 1 - elif self.field[self.pos] == '(': - self.commentlist.append(self.getcomment()) - elif self.field[self.pos] == '[': - sdlist.append(self.getdomainliteral()) - elif self.field[self.pos] == '.': - self.pos += 1 - sdlist.append('.') - elif self.field[self.pos] in self.atomends: - break - else: sdlist.append(self.getatom()) - return ''.join(sdlist) - - def getdelimited(self, beginchar, endchars, allowcomments = 1): - """Parse a header fragment delimited by special characters. - - `beginchar' is the start character for the fragment. If self is not - looking at an instance of `beginchar' then getdelimited returns the - empty string. - - `endchars' is a sequence of allowable end-delimiting characters. - Parsing stops when one of these is encountered. - - If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed - within the parsed fragment. - """ - if self.field[self.pos] != beginchar: - return '' - - slist = [''] - quote = 0 - self.pos += 1 - while self.pos < len(self.field): - if quote == 1: - slist.append(self.field[self.pos]) - quote = 0 - elif self.field[self.pos] in endchars: - self.pos += 1 - break - elif allowcomments and self.field[self.pos] == '(': - slist.append(self.getcomment()) - continue # have already advanced pos from getcomment - elif self.field[self.pos] == '\\': - quote = 1 - else: - slist.append(self.field[self.pos]) - self.pos += 1 - - return ''.join(slist) - - def getquote(self): - """Get a quote-delimited fragment from self's field.""" - return self.getdelimited('"', '"\r', 0) - - def getcomment(self): - """Get a parenthesis-delimited fragment from self's field.""" - return self.getdelimited('(', ')\r', 1) - - def getdomainliteral(self): - """Parse an RFC 2822 domain-literal.""" - return '[%s]' % self.getdelimited('[', ']\r', 0) - - def getatom(self, atomends=None): - """Parse an RFC 2822 atom. - - Optional atomends specifies a different set of end token delimiters - (the default is to use self.atomends). This is used e.g. in - getphraselist() since phrase endings must not include the `.' (which - is legal in phrases).""" - atomlist = [''] - if atomends is None: - atomends = self.atomends - - while self.pos < len(self.field): - if self.field[self.pos] in atomends: - break - else: atomlist.append(self.field[self.pos]) - self.pos += 1 - - return ''.join(atomlist) - - def getphraselist(self): - """Parse a sequence of RFC 2822 phrases. - - A phrase is a sequence of words, which are in turn either RFC 2822 - atoms or quoted-strings. Phrases are canonicalized by squeezing all - runs of continuous whitespace into one space. - """ - plist = [] - - while self.pos < len(self.field): - if self.field[self.pos] in self.LWS: - self.pos += 1 - elif self.field[self.pos] == '"': - plist.append(self.getquote()) - elif self.field[self.pos] == '(': - self.commentlist.append(self.getcomment()) - elif self.field[self.pos] in self.phraseends: - break - else: - plist.append(self.getatom(self.phraseends)) - - return plist - -class AddressList(AddrlistClass): - """An AddressList encapsulates a list of parsed RFC 2822 addresses.""" - def __init__(self, field): - AddrlistClass.__init__(self, field) - if field: - self.addresslist = self.getaddrlist() - else: - self.addresslist = [] - - def __len__(self): - return len(self.addresslist) - - def __str__(self): - return ", ".join(map(dump_address_pair, self.addresslist)) - - def __add__(self, other): - # Set union - newaddr = AddressList(None) - newaddr.addresslist = self.addresslist[:] - for x in other.addresslist: - if not x in self.addresslist: - newaddr.addresslist.append(x) - return newaddr - - def __iadd__(self, other): - # Set union, in-place - for x in other.addresslist: - if not x in self.addresslist: - self.addresslist.append(x) - return self - - def __sub__(self, other): - # Set difference - newaddr = AddressList(None) - for x in self.addresslist: - if not x in other.addresslist: - newaddr.addresslist.append(x) - return newaddr - - def __isub__(self, other): - # Set difference, in-place - for x in other.addresslist: - if x in self.addresslist: - self.addresslist.remove(x) - return self - - def __getitem__(self, index): - # Make indexing, slices, and 'in' work - return self.addresslist[index] - -def dump_address_pair(pair): - """Dump a (name, address) pair in a canonicalized form.""" - if pair[0]: - return '"' + pair[0] + '" <' + pair[1] + '>' - else: - return pair[1] - -# Parse a date field - -_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', - 'aug', 'sep', 'oct', 'nov', 'dec', - 'january', 'february', 'march', 'april', 'may', 'june', 'july', - 'august', 'september', 'october', 'november', 'december'] -_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'] - -# The timezone table does not include the military time zones defined -# in RFC822, other than Z. According to RFC1123, the description in -# RFC822 gets the signs wrong, so we can't rely on any such time -# zones. RFC1123 recommends that numeric timezone indicators be used -# instead of timezone names. - -_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0, - 'AST': -400, 'ADT': -300, # Atlantic (used in Canada) - 'EST': -500, 'EDT': -400, # Eastern - 'CST': -600, 'CDT': -500, # Central - 'MST': -700, 'MDT': -600, # Mountain - 'PST': -800, 'PDT': -700 # Pacific - } - - -def parsedate_tz(data): - """Convert a date string to a time tuple. - - Accounts for military timezones. - """ - if not data: - return None - data = data.split() - if data[0][-1] in (',', '.') or data[0].lower() in _daynames: - # There's a dayname here. Skip it - del data[0] - else: - # no space after the "weekday,"? - i = data[0].rfind(',') - if i >= 0: - data[0] = data[0][i+1:] - if len(data) == 3: # RFC 850 date, deprecated - stuff = data[0].split('-') - if len(stuff) == 3: - data = stuff + data[1:] - if len(data) == 4: - s = data[3] - i = s.find('+') - if i > 0: - data[3:] = [s[:i], s[i+1:]] - else: - data.append('') # Dummy tz - if len(data) < 5: - return None - data = data[:5] - [dd, mm, yy, tm, tz] = data - mm = mm.lower() - if not mm in _monthnames: - dd, mm = mm, dd.lower() - if not mm in _monthnames: - return None - mm = _monthnames.index(mm)+1 - if mm > 12: mm = mm - 12 - if dd[-1] == ',': - dd = dd[:-1] - i = yy.find(':') - if i > 0: - yy, tm = tm, yy - if yy[-1] == ',': - yy = yy[:-1] - if not yy[0].isdigit(): - yy, tz = tz, yy - if tm[-1] == ',': - tm = tm[:-1] - tm = tm.split(':') - if len(tm) == 2: - [thh, tmm] = tm - tss = '0' - elif len(tm) == 3: - [thh, tmm, tss] = tm - else: - return None - try: - yy = int(yy) - dd = int(dd) - thh = int(thh) - tmm = int(tmm) - tss = int(tss) - except ValueError: - return None - tzoffset = None - tz = tz.upper() - if tz in _timezones: - tzoffset = _timezones[tz] - else: - try: - tzoffset = int(tz) - except ValueError: - pass - # Convert a timezone offset into seconds ; -0500 -> -18000 - if tzoffset: - if tzoffset < 0: - tzsign = -1 - tzoffset = -tzoffset - else: - tzsign = 1 - tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60) - return (yy, mm, dd, thh, tmm, tss, 0, 1, 0, tzoffset) - - -def parsedate(data): - """Convert a time string to a time tuple.""" - t = parsedate_tz(data) - if t is None: - return t - return t[:9] - - -def mktime_tz(data): - """Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp.""" - if data[9] is None: - # No zone info, so localtime is better assumption than GMT - return time.mktime(data[:8] + (-1,)) - else: - t = time.mktime(data[:8] + (0,)) - return t - data[9] - time.timezone - -def formatdate(timeval=None): - """Returns time format preferred for Internet standards. - - Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 - - According to RFC 1123, day and month names must always be in - English. If not for that, this code could use strftime(). It - can't because strftime() honors the locale and could generated - non-English names. - """ - if timeval is None: - timeval = time.time() - timeval = time.gmtime(timeval) - return "%s, %02d %s %04d %02d:%02d:%02d GMT" % ( - ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")[timeval[6]], - timeval[2], - ("Jan", "Feb", "Mar", "Apr", "May", "Jun", - "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")[timeval[1]-1], - timeval[0], timeval[3], timeval[4], timeval[5]) - - -# When used as script, run a small test program. -# The first command line argument must be a filename containing one -# message in RFC-822 format. - -if __name__ == '__main__': - import sys, os - file = os.path.join(os.environ['HOME'], 'Mail/inbox/1') - if sys.argv[1:]: file = sys.argv[1] - f = open(file, 'r') - m = Message(f) - print 'From:', m.getaddr('from') - print 'To:', m.getaddrlist('to') - print 'Subject:', m.getheader('subject') - print 'Date:', m.getheader('date') - date = m.getdate_tz('date') - tz = date[-1] - date = time.localtime(mktime_tz(date)) - if date: - print 'ParsedDate:', time.asctime(date), - hhmmss = tz - hhmm, ss = divmod(hhmmss, 60) - hh, mm = divmod(hhmm, 60) - print "%+03d%02d" % (hh, mm), - if ss: print ".%02d" % ss, - print - else: - print 'ParsedDate:', None - m.rewindbody() - n = 0 - while f.readline(): - n += 1 - print 'Lines:', n - print '-'*70 - print 'len =', len(m) - if 'Date' in m: print 'Date =', m['Date'] - if 'X-Nonsense' in m: pass - print 'keys =', m.keys() - print 'values =', m.values() - print 'items =', m.items() diff --git a/icarus-miner/data/usr/lib/python2.6/rfc822.pyc b/icarus-miner/data/usr/lib/python2.6/rfc822.pyc deleted file mode 100644 index 8f5263b..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/rfc822.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/site-packages/pyserial-2.4-py2.6.egg-info/PKG-INFO b/icarus-miner/data/usr/lib/python2.6/site-packages/pyserial-2.4-py2.6.egg-info/PKG-INFO deleted file mode 100644 index 66b5e48..0000000 --- a/icarus-miner/data/usr/lib/python2.6/site-packages/pyserial-2.4-py2.6.egg-info/PKG-INFO +++ /dev/null @@ -1,21 +0,0 @@ -Metadata-Version: 1.0 -Name: pyserial -Version: 2.4 -Summary: Python Serial Port Extension -Home-page: http://pyserial.sourceforge.net/ -Author: Chris Liechti -Author-email: cliechti@gmx.net -License: Python -Description: Python Serial Port Extension for Win32, Linux, BSD, Jython -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: Intended Audience :: End Users/Desktop -Classifier: License :: OSI Approved :: Python Software Foundation License -Classifier: Natural Language :: English -Classifier: Operating System :: POSIX -Classifier: Operating System :: Microsoft :: Windows -Classifier: Programming Language :: Python -Classifier: Topic :: Communications -Classifier: Topic :: Software Development :: Libraries -Classifier: Topic :: Terminals :: Serial diff --git a/icarus-miner/data/usr/lib/python2.6/site-packages/pyserial-2.4-py2.6.egg-info/SOURCES.txt b/icarus-miner/data/usr/lib/python2.6/site-packages/pyserial-2.4-py2.6.egg-info/SOURCES.txt deleted file mode 100644 index 34e2236..0000000 --- a/icarus-miner/data/usr/lib/python2.6/site-packages/pyserial-2.4-py2.6.egg-info/SOURCES.txt +++ /dev/null @@ -1,32 +0,0 @@ -CHANGES.txt -LICENSE.txt -MANIFEST -README.txt -setup.cfg -setup.py -examples/enhancedserial.py -examples/miniterm.py -examples/scan.py -examples/scanwin32.py -examples/setup-miniterm-py2exe.py -examples/setup_demo.py -examples/tcp_serial_redirect.py -examples/test.py -examples/test_advanced.py -examples/test_high_load.py -examples/wxSerialConfigDialog.py -examples/wxSerialConfigDialog.wxg -examples/wxTerminal.py -examples/wxTerminal.wxg -pyserial.egg-info/PKG-INFO -pyserial.egg-info/SOURCES.txt -pyserial.egg-info/dependency_links.txt -pyserial.egg-info/top_level.txt -serial/.cvsignore -serial/__init__.py -serial/serialcli.py -serial/serialjava.py -serial/serialposix.py -serial/serialutil.py -serial/serialwin32.py -serial/sermsdos.py \ No newline at end of file diff --git a/icarus-miner/data/usr/lib/python2.6/site-packages/pyserial-2.4-py2.6.egg-info/dependency_links.txt b/icarus-miner/data/usr/lib/python2.6/site-packages/pyserial-2.4-py2.6.egg-info/dependency_links.txt deleted file mode 100644 index 8b13789..0000000 --- a/icarus-miner/data/usr/lib/python2.6/site-packages/pyserial-2.4-py2.6.egg-info/dependency_links.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/icarus-miner/data/usr/lib/python2.6/site-packages/pyserial-2.4-py2.6.egg-info/top_level.txt b/icarus-miner/data/usr/lib/python2.6/site-packages/pyserial-2.4-py2.6.egg-info/top_level.txt deleted file mode 100644 index b6be06a..0000000 --- a/icarus-miner/data/usr/lib/python2.6/site-packages/pyserial-2.4-py2.6.egg-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -serial diff --git a/icarus-miner/data/usr/lib/python2.6/site-packages/serial/__init__.py b/icarus-miner/data/usr/lib/python2.6/site-packages/serial/__init__.py deleted file mode 100644 index 681ad5c..0000000 --- a/icarus-miner/data/usr/lib/python2.6/site-packages/serial/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -#portable serial port access with python -#this is a wrapper module for different platform implementations -# -# (C)2001-2002 Chris Liechti -# this is distributed under a free software license, see license.txt - -VERSION = '2.4' - -import sys - -if sys.platform == 'cli': - from serialcli import * -else: - import os - #chose an implementation, depending on os - if os.name == 'nt': #sys.platform == 'win32': - from serialwin32 import * - elif os.name == 'posix': - from serialposix import * - elif os.name == 'java': - from serialjava import * - else: - raise Exception("Sorry: no implementation for your platform ('%s') available" % os.name) - diff --git a/icarus-miner/data/usr/lib/python2.6/site-packages/serial/__init__.pyc b/icarus-miner/data/usr/lib/python2.6/site-packages/serial/__init__.pyc deleted file mode 100644 index 2ebafbc..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/site-packages/serial/__init__.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/site-packages/serial/serialcli.py b/icarus-miner/data/usr/lib/python2.6/site-packages/serial/serialcli.py deleted file mode 100644 index 9864848..0000000 --- a/icarus-miner/data/usr/lib/python2.6/site-packages/serial/serialcli.py +++ /dev/null @@ -1,247 +0,0 @@ -#! python -# Python Serial Port Extension for Win32, Linux, BSD, Jython and .NET/Mono -# serial driver for .NET/Mono (IronPython), .NET >= 2 -# see __init__.py -# -# (C) 2008 Chris Liechti -# this is distributed under a free software license, see license.txt - -import clr -import System -import System.IO.Ports -from serialutil import * - -def device(portnum): - """Turn a port number into a device name""" - return System.IO.Ports.SerialPort.GetPortNames()[portnum] - -# must invoke function with byte array, make a helper to convert strings -# to byte arrays -sab = System.Array[System.Byte] -def as_byte_array(string): - return sab([ord(x) for x in string]) - -class Serial(SerialBase): - """Serial port implemenation for .NET/Mono.""" - - BAUDRATES = (50,75,110,134,150,200,300,600,1200,1800,2400,4800,9600, - 19200,38400,57600,115200) - - def open(self): - """Open port with current settings. This may throw a SerialException - if the port cannot be opened.""" - if self._port is None: - raise SerialException("Port must be configured before it can be used.") - try: - self._port_handle = System.IO.Ports.SerialPort(self.portstr) - except Exception, msg: - self._port_handle = None - raise SerialException("could not open port %s: %s" % (self.portstr, msg)) - - self._reconfigurePort() - self._port_handle.Open() - self._isOpen = True - if not self._rtscts: - self.setRTS(True) - self.setDTR(True) - self.flushInput() - self.flushOutput() - - def _reconfigurePort(self): - """Set communication parameters on opened port.""" - if not self._port_handle: - raise SerialException("Can only operate on a valid port handle") - - self.ReceivedBytesThreshold = 1 - - if self._timeout is None: - self._port_handle.ReadTimeout = System.IO.Ports.SerialPort.InfiniteTimeout - else: - self._port_handle.ReadTimeout = int(self._timeout*1000) - - # if self._timeout != 0 and self._interCharTimeout is not None: - # timeouts = (int(self._interCharTimeout * 1000),) + timeouts[1:] - - if self._writeTimeout is None: - self._port_handle.WriteTimeout = System.IO.Ports.SerialPort.InfiniteTimeout - else: - self._port_handle.WriteTimeout = int(self._writeTimeout*1000) - - - # Setup the connection info. - try: - self._port_handle.BaudRate = self._baudrate - except IOError, e: - # catch errors from illegal baudrate settings - raise ValueError(str(e)) - - if self._bytesize == FIVEBITS: - self._port_handle.DataBits = 5 - elif self._bytesize == SIXBITS: - self._port_handle.DataBits = 6 - elif self._bytesize == SEVENBITS: - self._port_handle.DataBits = 7 - elif self._bytesize == EIGHTBITS: - self._port_handle.DataBits = 8 - else: - raise ValueError("Unsupported number of data bits: %r" % self._bytesize) - - if self._parity == PARITY_NONE: - self._port_handle.Parity = System.IO.Ports.Parity.None - elif self._parity == PARITY_EVEN: - self._port_handle.Parity = System.IO.Ports.Parity.Even - elif self._parity == PARITY_ODD: - self._port_handle.Parity = System.IO.Ports.Parity.Odd - elif self._parity == PARITY_MARK: - self._port_handle.Parity = System.IO.Ports.Parity.Mark - elif self._parity == PARITY_SPACE: - self._port_handle.Parity = System.IO.Ports.Parity.Space - else: - raise ValueError("Unsupported parity mode: %r" % self._parity) - - if self._stopbits == STOPBITS_ONE: - self._port_handle.StopBits = System.IO.Ports.StopBits.One - elif self._stopbits == STOPBITS_TWO: - self._port_handle.StopBits = System.IO.Ports.StopBits.Two - else: - raise ValueError("Unsupported number of stop bits: %r" % self._stopbits) - - if self._rtscts and self._xonxoff: - self._port_handle.Handshake = System.IO.Ports.Handshake.RequestToSendXOnXOff - elif self._rtscts: - self._port_handle.Handshake = System.IO.Ports.Handshake.RequestToSend - elif self._xonxoff: - self._port_handle.Handshake = System.IO.Ports.Handshake.XOnXOff - else: - self._port_handle.Handshake = System.IO.Ports.Handshake.None - - #~ def __del__(self): - #~ self.close() - - def close(self): - """Close port""" - if self._isOpen: - if self._port_handle: - try: - self._port_handle.Close() - except System.IO.Ports.InvalidOperationException: - # ignore errors. can happen for unplugged USB serial devices - pass - self._port_handle = None - self._isOpen = False - - def makeDeviceName(self, port): - try: - return device(port) - except TypeError, e: - raise SerialException(str(e)) - - # - - - - - - - - - - - - - - - - - - - - - - - - - - def inWaiting(self): - """Return the number of characters currently in the input buffer.""" - if not self._port_handle: raise portNotOpenError - return self._port_handle.BytesToRead - - def read(self, size=1): - """Read size bytes from the serial port. If a timeout is set it may - return less characters as requested. With no timeout it will block - until the requested number of bytes is read.""" - if not self._port_handle: raise portNotOpenError - # must use single byte reads as this is the only way to read - # without applying encodings - data = [] - while size: - try: - data.append(chr(self._port_handle.ReadByte())) - except System.TimeoutException, e: - break - else: - size -= 1 - return ''.join(data) - - def write(self, data): - """Output the given string over the serial port.""" - if not self._port_handle: raise portNotOpenError - if not isinstance(data, str): - raise TypeError('expected str, got %s' % type(data)) - try: - # must call overloaded method with byte array argument - # as this is the only one not applying encodings - self._port_handle.Write(as_byte_array(data), 0, len(data)) - except System.TimeoutException, e: - raise writeTimeoutError - - def flushInput(self): - """Clear input buffer, discarding all that is in the buffer.""" - if not self._port_handle: raise portNotOpenError - self._port_handle.DiscardInBuffer() - - def flushOutput(self): - """Clear output buffer, aborting the current output and - discarding all that is in the buffer.""" - if not self._port_handle: raise portNotOpenError - self._port_handle.DiscardOutBuffer() - - def sendBreak(self, duration=0.25): - """Send break condition. Timed, returns to idle state after given duration.""" - if not self._port_handle: raise portNotOpenError - import time - self._port_handle.BreakState = True - time.sleep(duration) - self._port_handle.BreakState = False - - def setBreak(self, level=True): - """Set break: Controls TXD. When active, to transmitting is possible.""" - if not self._port_handle: raise portNotOpenError - self._port_handle.BreakState = bool(level) - - def setRTS(self, level=True): - """Set terminal status line: Request To Send""" - if not self._port_handle: raise portNotOpenError - self._port_handle.RtsEnable = bool(level) - - def setDTR(self, level=True): - """Set terminal status line: Data Terminal Ready""" - if not self._port_handle: raise portNotOpenError - self._port_handle.DtrEnable = bool(level) - - def getCTS(self): - """Read terminal status line: Clear To Send""" - if not self._port_handle: raise portNotOpenError - return self._port_handle.CtsHolding - - def getDSR(self): - """Read terminal status line: Data Set Ready""" - if not self._port_handle: raise portNotOpenError - return self._port_handle.DsrHolding - - def getRI(self): - """Read terminal status line: Ring Indicator""" - if not self._port_handle: raise portNotOpenError - #~ return self._port_handle.XXX - return False #XXX an error would be better - - def getCD(self): - """Read terminal status line: Carrier Detect""" - if not self._port_handle: raise portNotOpenError - return self._port_handle.CDHolding - - # - - platform specific - - - - - -#Nur Testfunktion!! -if __name__ == '__main__': - s = Serial(0) - print s - - s = Serial() - print s - - - s.baudrate = 19200 - s.databits = 7 - s.close() - s.port = 0 - s.open() - print s - diff --git a/icarus-miner/data/usr/lib/python2.6/site-packages/serial/serialjava.py b/icarus-miner/data/usr/lib/python2.6/site-packages/serial/serialjava.py deleted file mode 100644 index cca46dc..0000000 --- a/icarus-miner/data/usr/lib/python2.6/site-packages/serial/serialjava.py +++ /dev/null @@ -1,240 +0,0 @@ -#!jython -# -# Python Serial Port Extension for Win32, Linux, BSD, Jython -# module for serial IO for Jython and JavaComm -# see __init__.py -# -# (C) 2002-2008 Chris Liechti -# this is distributed under a free software license, see license.txt - -from serialutil import * - -def my_import(name): - mod = __import__(name) - components = name.split('.') - for comp in components[1:]: - mod = getattr(mod, comp) - return mod - -def detect_java_comm(names): - """try given list of modules and return that imports""" - for name in names: - try: - mod = my_import(name) - mod.SerialPort - return mod - except (ImportError, AttributeError): - pass - raise ImportError("No Java Communications API implementation found") - -# Java Communications API implementations -# http://mho.republika.pl/java/comm/ - -comm = detect_java_comm([ - 'javax.comm', # Sun/IBM - 'gnu.io', # RXTX -]) - - -def device(portnumber): - """Turn a port number into a device name""" - enum = comm.CommPortIdentifier.getPortIdentifiers() - ports = [] - while enum.hasMoreElements(): - el = enum.nextElement() - if el.getPortType() == comm.CommPortIdentifier.PORT_SERIAL: - ports.append(el) - return ports[portnumber].getName() - -class Serial(SerialBase): - """Serial port class, implemented with Java Communications API and - thus usable with jython and the appropriate java extension.""" - - def open(self): - """Open port with current settings. This may throw a SerialException - if the port cannot be opened.""" - if self._port is None: - raise SerialException("Port must be configured before it can be used.") - if type(self._port) == type(''): #strings are taken directly - portId = comm.CommPortIdentifier.getPortIdentifier(self._port) - else: - portId = comm.CommPortIdentifier.getPortIdentifier(device(self._port)) #numbers are transformed to a comportid obj - try: - self.sPort = portId.open("python serial module", 10) - except Exception, msg: - self.sPort = None - raise SerialException("Could not open port: %s" % msg) - self._reconfigurePort() - self._instream = self.sPort.getInputStream() - self._outstream = self.sPort.getOutputStream() - self._isOpen = True - - def _reconfigurePort(self): - """Set commuication parameters on opened port.""" - if not self.sPort: - raise SerialException("Can only operate on a valid port handle") - - self.sPort.enableReceiveTimeout(30) - if self._bytesize == FIVEBITS: - jdatabits = comm.SerialPort.DATABITS_5 - elif self._bytesize == SIXBITS: - jdatabits = comm.SerialPort.DATABITS_6 - elif self._bytesize == SEVENBITS: - jdatabits = comm.SerialPort.DATABITS_7 - elif self._bytesize == EIGHTBITS: - jdatabits = comm.SerialPort.DATABITS_8 - else: - raise ValueError("unsupported bytesize: %r" % self._bytesize) - - if self._stopbits == STOPBITS_ONE: - jstopbits = comm.SerialPort.STOPBITS_1 - elif stopbits == STOPBITS_ONE_HALVE: - self._jstopbits = comm.SerialPort.STOPBITS_1_5 - elif self._stopbits == STOPBITS_TWO: - jstopbits = comm.SerialPort.STOPBITS_2 - else: - raise ValueError("unsupported number of stopbits: %r" % self._stopbits) - - if self._parity == PARITY_NONE: - jparity = comm.SerialPort.PARITY_NONE - elif self._parity == PARITY_EVEN: - jparity = comm.SerialPort.PARITY_EVEN - elif self._parity == PARITY_ODD: - jparity = comm.SerialPort.PARITY_ODD - elif self._parity == PARITY_MARK: - jparity = comm.SerialPort.PARITY_MARK - elif self._parity == PARITY_SPACE: - jparity = comm.SerialPort.PARITY_SPACE - else: - raise ValueError("unsupported parity type: %r" % self._parity) - - jflowin = jflowout = 0 - if self._rtscts: - jflowin |= comm.SerialPort.FLOWCONTROL_RTSCTS_IN - jflowout |= comm.SerialPort.FLOWCONTROL_RTSCTS_OUT - if self._xonxoff: - jflowin |= comm.SerialPort.FLOWCONTROL_XONXOFF_IN - jflowout |= comm.SerialPort.FLOWCONTROL_XONXOFF_OUT - - self.sPort.setSerialPortParams(baudrate, jdatabits, jstopbits, jparity) - self.sPort.setFlowControlMode(jflowin | jflowout) - - if self._timeout >= 0: - self.sPort.enableReceiveTimeout(self._timeout*1000) - else: - self.sPort.disableReceiveTimeout() - - def close(self): - """Close port""" - if self._isOpen: - if self.sPort: - self._instream.close() - self._outstream.close() - self.sPort.close() - self.sPort = None - self._isOpen = False - - def makeDeviceName(self, port): - return device(port) - - # - - - - - - - - - - - - - - - - - - - - - - - - - - def inWaiting(self): - """Return the number of characters currently in the input buffer.""" - if not self.sPort: raise portNotOpenError - return self._instream.available() - - def read(self, size=1): - """Read size bytes from the serial port. If a timeout is set it may - return less characters as requested. With no timeout it will block - until the requested number of bytes is read.""" - if not self.sPort: raise portNotOpenError - read = '' - if size > 0: - while len(read) < size: - x = self._instream.read() - if x == -1: - if self.timeout >= 0: - break - else: - read = read + chr(x) - return read - - def write(self, data): - """Output the given string over the serial port.""" - if not self.sPort: raise portNotOpenError - self._outstream.write(data) - - def flushInput(self): - """Clear input buffer, discarding all that is in the buffer.""" - if not self.sPort: raise portNotOpenError - self._instream.skip(self._instream.available()) - - def flushOutput(self): - """Clear output buffer, aborting the current output and - discarding all that is in the buffer.""" - if not self.sPort: raise portNotOpenError - self._outstream.flush() - - def sendBreak(self, duration=0.25): - """Send break condition. Timed, returns to idle state after given duration.""" - if not self.sPort: raise portNotOpenError - self.sPort.sendBreak(duration*1000.0) - - def setBreak(self, level=1): - """Set break: Controls TXD. When active, to transmitting is possible.""" - if self.fd is None: raise portNotOpenError - raise SerialException("The setBreak function is not implemented in java.") - - def setRTS(self, level=1): - """Set terminal status line: Request To Send""" - if not self.sPort: raise portNotOpenError - self.sPort.setRTS(level) - - def setDTR(self, level=1): - """Set terminal status line: Data Terminal Ready""" - if not self.sPort: raise portNotOpenError - self.sPort.setDTR(level) - - def getCTS(self): - """Read terminal status line: Clear To Send""" - if not self.sPort: raise portNotOpenError - self.sPort.isCTS() - - def getDSR(self): - """Read terminal status line: Data Set Ready""" - if not self.sPort: raise portNotOpenError - self.sPort.isDSR() - - def getRI(self): - """Read terminal status line: Ring Indicator""" - if not self.sPort: raise portNotOpenError - self.sPort.isRI() - - def getCD(self): - """Read terminal status line: Carrier Detect""" - if not self.sPort: raise portNotOpenError - self.sPort.isCD() - - - -if __name__ == '__main__': - s = Serial(0, - baudrate=19200, #baudrate - bytesize=EIGHTBITS, #number of databits - parity=PARITY_EVEN, #enable parity checking - stopbits=STOPBITS_ONE, #number of stopbits - timeout=3, #set a timeout value, None for waiting forever - xonxoff=0, #enable software flow control - rtscts=0, #enable RTS/CTS flow control - ) - s.setRTS(1) - s.setDTR(1) - s.flushInput() - s.flushOutput() - s.write('hello') - print repr(s.read(5)) - print s.inWaiting() - del s - - diff --git a/icarus-miner/data/usr/lib/python2.6/site-packages/serial/serialposix.py b/icarus-miner/data/usr/lib/python2.6/site-packages/serial/serialposix.py deleted file mode 100644 index 174e2f7..0000000 --- a/icarus-miner/data/usr/lib/python2.6/site-packages/serial/serialposix.py +++ /dev/null @@ -1,492 +0,0 @@ -#!/usr/bin/env python -# Python Serial Port Extension for Win32, Linux, BSD, Jython -# module for serial IO for POSIX compatible systems, like Linux -# see __init__.py -# -# (C) 2001-2008 Chris Liechti -# this is distributed under a free software license, see license.txt -# -# parts based on code from Grant B. Edwards : -# ftp://ftp.visi.com/users/grante/python/PosixSerial.py -# references: http://www.easysw.com/~mike/serial/serial.html - -import sys, os, fcntl, termios, struct, select, errno -from serialutil import * - -#Do check the Python version as some constants have moved. -if (sys.hexversion < 0x020100f0): - import TERMIOS -else: - TERMIOS = termios - -if (sys.hexversion < 0x020200f0): - import FCNTL -else: - FCNTL = fcntl - -#try to detect the os so that a device can be selected... -plat = sys.platform.lower() - -if plat[:5] == 'linux': #Linux (confirmed) - def device(port): - return '/dev/ttyS%d' % port - -elif plat == 'cygwin': #cywin/win32 (confirmed) - def device(port): - return '/dev/com%d' % (port + 1) - -elif plat == 'openbsd3': #BSD (confirmed) - def device(port): - return '/dev/ttyp%d' % port - -elif plat[:3] == 'bsd' or \ - plat[:7] == 'freebsd' or \ - plat[:7] == 'openbsd' or \ - plat[:6] == 'darwin': #BSD (confirmed for freebsd4: cuaa%d) - def device(port): - return '/dev/cuad%d' % port - -elif plat[:6] == 'netbsd': #NetBSD 1.6 testing by Erk - def device(port): - return '/dev/dty%02d' % port - -elif plat[:4] == 'irix': #IRIX (partialy tested) - def device(port): - return '/dev/ttyf%d' % (port+1) #XXX different device names depending on flow control - -elif plat[:2] == 'hp': #HP-UX (not tested) - def device(port): - return '/dev/tty%dp0' % (port+1) - -elif plat[:5] == 'sunos': #Solaris/SunOS (confirmed) - def device(port): - return '/dev/tty%c' % (ord('a')+port) - -elif plat[:3] == 'aix': #aix - def device(port): - return '/dev/tty%d' % (port) - -else: - #platform detection has failed... - print """don't know how to number ttys on this system. -! Use an explicit path (eg /dev/ttyS1) or send this information to -! the author of this module: - -sys.platform = %r -os.name = %r -serialposix.py version = %s - -also add the device name of the serial port and where the -counting starts for the first serial port. -e.g. 'first serial port: /dev/ttyS0' -and with a bit luck you can get this module running... -""" % (sys.platform, os.name, VERSION) - #no exception, just continue with a brave attempt to build a device name - #even if the device name is not correct for the platform it has chances - #to work using a string with the real device name as port paramter. - def device(portum): - return '/dev/ttyS%d' % portnum - #~ raise Exception, "this module does not run on this platform, sorry." - -#whats up with "aix", "beos", .... -#they should work, just need to know the device names. - - -#load some constants for later use. -#try to use values from TERMIOS, use defaults from linux otherwise -TIOCMGET = hasattr(TERMIOS, 'TIOCMGET') and TERMIOS.TIOCMGET or 0x5415 -TIOCMBIS = hasattr(TERMIOS, 'TIOCMBIS') and TERMIOS.TIOCMBIS or 0x5416 -TIOCMBIC = hasattr(TERMIOS, 'TIOCMBIC') and TERMIOS.TIOCMBIC or 0x5417 -TIOCMSET = hasattr(TERMIOS, 'TIOCMSET') and TERMIOS.TIOCMSET or 0x5418 - -#TIOCM_LE = hasattr(TERMIOS, 'TIOCM_LE') and TERMIOS.TIOCM_LE or 0x001 -TIOCM_DTR = hasattr(TERMIOS, 'TIOCM_DTR') and TERMIOS.TIOCM_DTR or 0x002 -TIOCM_RTS = hasattr(TERMIOS, 'TIOCM_RTS') and TERMIOS.TIOCM_RTS or 0x004 -#TIOCM_ST = hasattr(TERMIOS, 'TIOCM_ST') and TERMIOS.TIOCM_ST or 0x008 -#TIOCM_SR = hasattr(TERMIOS, 'TIOCM_SR') and TERMIOS.TIOCM_SR or 0x010 - -TIOCM_CTS = hasattr(TERMIOS, 'TIOCM_CTS') and TERMIOS.TIOCM_CTS or 0x020 -TIOCM_CAR = hasattr(TERMIOS, 'TIOCM_CAR') and TERMIOS.TIOCM_CAR or 0x040 -TIOCM_RNG = hasattr(TERMIOS, 'TIOCM_RNG') and TERMIOS.TIOCM_RNG or 0x080 -TIOCM_DSR = hasattr(TERMIOS, 'TIOCM_DSR') and TERMIOS.TIOCM_DSR or 0x100 -TIOCM_CD = hasattr(TERMIOS, 'TIOCM_CD') and TERMIOS.TIOCM_CD or TIOCM_CAR -TIOCM_RI = hasattr(TERMIOS, 'TIOCM_RI') and TERMIOS.TIOCM_RI or TIOCM_RNG -#TIOCM_OUT1 = hasattr(TERMIOS, 'TIOCM_OUT1') and TERMIOS.TIOCM_OUT1 or 0x2000 -#TIOCM_OUT2 = hasattr(TERMIOS, 'TIOCM_OUT2') and TERMIOS.TIOCM_OUT2 or 0x4000 -TIOCINQ = hasattr(TERMIOS, 'FIONREAD') and TERMIOS.FIONREAD or 0x541B - -TIOCM_zero_str = struct.pack('I', 0) -TIOCM_RTS_str = struct.pack('I', TIOCM_RTS) -TIOCM_DTR_str = struct.pack('I', TIOCM_DTR) - -TIOCSBRK = hasattr(TERMIOS, 'TIOCSBRK') and TERMIOS.TIOCSBRK or 0x5427 -TIOCCBRK = hasattr(TERMIOS, 'TIOCCBRK') and TERMIOS.TIOCCBRK or 0x5428 - -ASYNC_SPD_MASK = 0x1030 -ASYNC_SPD_CUST = 0x0030 - -baudrate_constants = { - 0: 0000000, # hang up - 50: 0000001, - 75: 0000002, - 110: 0000003, - 134: 0000004, - 150: 0000005, - 200: 0000006, - 300: 0000007, - 600: 0000010, - 1200: 0000011, - 1800: 0000012, - 2400: 0000013, - 4800: 0000014, - 9600: 0000015, - 19200: 0000016, - 38400: 0000017, - 57600: 0010001, - 115200: 0010002, - 230400: 0010003, - 460800: 0010004, - 500000: 0010005, - 576000: 0010006, - 921600: 0010007, - 1000000: 0010010, - 1152000: 0010011, - 1500000: 0010012, - 2000000: 0010013, - 2500000: 0010014, - 3000000: 0010015, - 3500000: 0010016, - 4000000: 0010017 -} - - -class Serial(SerialBase): - """Serial port class POSIX implementation. Serial port configuration is - done with termios and fcntl. Runs on Linux and many other Un*x like - systems.""" - - def open(self): - """Open port with current settings. This may throw a SerialException - if the port cannot be opened.""" - if self._port is None: - raise SerialException("Port must be configured before it can be used.") - self.fd = None - #open - try: - self.fd = os.open(self.portstr, os.O_RDWR|os.O_NOCTTY|os.O_NONBLOCK) - except Exception, msg: - self.fd = None - raise SerialException("could not open port %s: %s" % (self._port, msg)) - #~ fcntl.fcntl(self.fd, FCNTL.F_SETFL, 0) #set blocking - - try: - self._reconfigurePort() - except: - os.close(self.fd) - self.fd = None - else: - self._isOpen = True - #~ self.flushInput() - - - def _reconfigurePort(self): - """Set communication parameters on opened port.""" - if self.fd is None: - raise SerialException("Can only operate on a valid port handle") - custom_baud = None - - vmin = vtime = 0 #timeout is done via select - if self._interCharTimeout is not None: - vmin = 1 - vtime = int(self._interCharTimeout * 10) - try: - iflag, oflag, cflag, lflag, ispeed, ospeed, cc = termios.tcgetattr(self.fd) - except termios.error, msg: #if a port is nonexistent but has a /dev file, it'll fail here - raise SerialException("Could not configure port: %s" % msg) - #set up raw mode / no echo / binary - cflag |= (TERMIOS.CLOCAL|TERMIOS.CREAD) - lflag &= ~(TERMIOS.ICANON|TERMIOS.ECHO|TERMIOS.ECHOE|TERMIOS.ECHOK|TERMIOS.ECHONL| - TERMIOS.ISIG|TERMIOS.IEXTEN) #|TERMIOS.ECHOPRT - for flag in ('ECHOCTL', 'ECHOKE'): #netbsd workaround for Erk - if hasattr(TERMIOS, flag): - lflag &= ~getattr(TERMIOS, flag) - - oflag &= ~(TERMIOS.OPOST) - iflag &= ~(TERMIOS.INLCR|TERMIOS.IGNCR|TERMIOS.ICRNL|TERMIOS.IGNBRK) - if hasattr(TERMIOS, 'IUCLC'): - iflag &= ~TERMIOS.IUCLC - if hasattr(TERMIOS, 'PARMRK'): - iflag &= ~TERMIOS.PARMRK - - #setup baudrate - try: - ispeed = ospeed = getattr(TERMIOS,'B%s' % (self._baudrate)) - except AttributeError: - try: - ispeed = ospeed = baudrate_constants[self._baudrate] - except KeyError: - #~ raise ValueError('Invalid baud rate: %r' % self._baudrate) - # may need custom baud rate, it isnt in our list. - ispeed = ospeed = getattr(TERMIOS, 'B38400') - custom_baud = int(self._baudrate) # store for later - - #setup char len - cflag &= ~TERMIOS.CSIZE - if self._bytesize == 8: - cflag |= TERMIOS.CS8 - elif self._bytesize == 7: - cflag |= TERMIOS.CS7 - elif self._bytesize == 6: - cflag |= TERMIOS.CS6 - elif self._bytesize == 5: - cflag |= TERMIOS.CS5 - else: - raise ValueError('Invalid char len: %r' % self._bytesize) - #setup stopbits - if self._stopbits == STOPBITS_ONE: - cflag &= ~(TERMIOS.CSTOPB) - elif self._stopbits == STOPBITS_TWO: - cflag |= (TERMIOS.CSTOPB) - else: - raise ValueError('Invalid stopit specification: %r' % self._stopbits) - #setup parity - iflag &= ~(TERMIOS.INPCK|TERMIOS.ISTRIP) - if self._parity == PARITY_NONE: - cflag &= ~(TERMIOS.PARENB|TERMIOS.PARODD) - elif self._parity == PARITY_EVEN: - cflag &= ~(TERMIOS.PARODD) - cflag |= (TERMIOS.PARENB) - elif self._parity == PARITY_ODD: - cflag |= (TERMIOS.PARENB|TERMIOS.PARODD) - else: - raise ValueError('Invalid parity: %r' % self._parity) - #setup flow control - #xonxoff - if hasattr(TERMIOS, 'IXANY'): - if self._xonxoff: - iflag |= (TERMIOS.IXON|TERMIOS.IXOFF) #|TERMIOS.IXANY) - else: - iflag &= ~(TERMIOS.IXON|TERMIOS.IXOFF|TERMIOS.IXANY) - else: - if self._xonxoff: - iflag |= (TERMIOS.IXON|TERMIOS.IXOFF) - else: - iflag &= ~(TERMIOS.IXON|TERMIOS.IXOFF) - #rtscts - if hasattr(TERMIOS, 'CRTSCTS'): - if self._rtscts: - cflag |= (TERMIOS.CRTSCTS) - else: - cflag &= ~(TERMIOS.CRTSCTS) - elif hasattr(TERMIOS, 'CNEW_RTSCTS'): #try it with alternate constant name - if self._rtscts: - cflag |= (TERMIOS.CNEW_RTSCTS) - else: - cflag &= ~(TERMIOS.CNEW_RTSCTS) - #XXX should there be a warning if setting up rtscts (and xonxoff etc) fails?? - - #buffer - #vmin "minimal number of characters to be read. = for non blocking" - if vmin < 0 or vmin > 255: - raise ValueError('Invalid vmin: %r ' % vmin) - cc[TERMIOS.VMIN] = vmin - #vtime - if vtime < 0 or vtime > 255: - raise ValueError('Invalid vtime: %r' % vtime) - cc[TERMIOS.VTIME] = vtime - #activate settings - termios.tcsetattr(self.fd, TERMIOS.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc]) - - # apply custom baud rate, if any - if custom_baud is not None: - import array - buf = array.array('i', [0] * 32) - - # get serial_struct - FCNTL.ioctl(self.fd, TERMIOS.TIOCGSERIAL, buf) - - # set custom divisor - buf[6] = buf[7] / custom_baud - - # update flags - buf[4] &= ~ASYNC_SPD_MASK - buf[4] |= ASYNC_SPD_CUST - - # set serial_struct - try: - res = FCNTL.ioctl(self.fd, TERMIOS.TIOCSSERIAL, buf) - except IOError: - raise ValueError('Failed to set custom baud rate: %r' % self._baudrate) - - def close(self): - """Close port""" - if self._isOpen: - if self.fd is not None: - os.close(self.fd) - self.fd = None - self._isOpen = False - - def makeDeviceName(self, port): - return device(port) - - # - - - - - - - - - - - - - - - - - - - - - - - - - - def inWaiting(self): - """Return the number of characters currently in the input buffer.""" - #~ s = fcntl.ioctl(self.fd, TERMIOS.FIONREAD, TIOCM_zero_str) - s = fcntl.ioctl(self.fd, TIOCINQ, TIOCM_zero_str) - return struct.unpack('I',s)[0] - - def read(self, size=1): - """Read size bytes from the serial port. If a timeout is set it may - return less characters as requested. With no timeout it will block - until the requested number of bytes is read.""" - if self.fd is None: raise portNotOpenError - read = '' - inp = None - if size > 0: - while len(read) < size: - #print "\tread(): size",size, "have", len(read) #debug - ready,_,_ = select.select([self.fd],[],[], self._timeout) - if not ready: - break #timeout - buf = os.read(self.fd, size-len(read)) - read = read + buf - if (self._timeout >= 0 or self._interCharTimeout > 0) and not buf: - break #early abort on timeout - return read - - def write(self, data): - """Output the given string over the serial port.""" - if self.fd is None: raise portNotOpenError - if not isinstance(data, str): - raise TypeError('expected str, got %s' % type(data)) - t = len(data) - d = data - while t > 0: - try: - if self._writeTimeout is not None and self._writeTimeout > 0: - _,ready,_ = select.select([],[self.fd],[], self._writeTimeout) - if not ready: - raise writeTimeoutError - n = os.write(self.fd, d) - if self._writeTimeout is not None and self._writeTimeout > 0: - _,ready,_ = select.select([],[self.fd],[], self._writeTimeout) - if not ready: - raise writeTimeoutError - d = d[n:] - t = t - n - except OSError,v: - if v.errno != errno.EAGAIN: - raise - - def flush(self): - """Flush of file like objects. In this case, wait until all data - is written.""" - self.drainOutput() - - def flushInput(self): - """Clear input buffer, discarding all that is in the buffer.""" - if self.fd is None: - raise portNotOpenError - termios.tcflush(self.fd, TERMIOS.TCIFLUSH) - - def flushOutput(self): - """Clear output buffer, aborting the current output and - discarding all that is in the buffer.""" - if self.fd is None: - raise portNotOpenError - termios.tcflush(self.fd, TERMIOS.TCOFLUSH) - - def sendBreak(self, duration=0.25): - """Send break condition. Timed, returns to idle state after given duration.""" - if self.fd is None: - raise portNotOpenError - termios.tcsendbreak(self.fd, int(duration/0.25)) - - def setBreak(self, level=1): - """Set break: Controls TXD. When active, to transmitting is possible.""" - if self.fd is None: raise portNotOpenError - if level: - fcntl.ioctl(self.fd, TIOCSBRK) - else: - fcntl.ioctl(self.fd, TIOCCBRK) - - def setRTS(self, level=1): - """Set terminal status line: Request To Send""" - if self.fd is None: raise portNotOpenError - if level: - fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_RTS_str) - else: - fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_RTS_str) - - def setDTR(self, level=1): - """Set terminal status line: Data Terminal Ready""" - if self.fd is None: raise portNotOpenError - if level: - fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_DTR_str) - else: - fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_DTR_str) - - def getCTS(self): - """Read terminal status line: Clear To Send""" - if self.fd is None: raise portNotOpenError - s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str) - return struct.unpack('I',s)[0] & TIOCM_CTS != 0 - - def getDSR(self): - """Read terminal status line: Data Set Ready""" - if self.fd is None: raise portNotOpenError - s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str) - return struct.unpack('I',s)[0] & TIOCM_DSR != 0 - - def getRI(self): - """Read terminal status line: Ring Indicator""" - if self.fd is None: raise portNotOpenError - s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str) - return struct.unpack('I',s)[0] & TIOCM_RI != 0 - - def getCD(self): - """Read terminal status line: Carrier Detect""" - if self.fd is None: raise portNotOpenError - s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str) - return struct.unpack('I',s)[0] & TIOCM_CD != 0 - - # - - platform specific - - - - - - def drainOutput(self): - """internal - not portable!""" - if self.fd is None: raise portNotOpenError - termios.tcdrain(self.fd) - - def nonblocking(self): - """internal - not portable!""" - if self.fd is None: - raise portNotOpenError - fcntl.fcntl(self.fd, FCNTL.F_SETFL, FCNTL.O_NONBLOCK) - - def fileno(self): - """For easier of the serial port instance with select. - WARNING: this function is not portable to different platforms!""" - if self.fd is None: raise portNotOpenError - return self.fd - -if __name__ == '__main__': - s = Serial(0, - baudrate=19200, #baudrate - bytesize=EIGHTBITS, #number of databits - parity=PARITY_EVEN, #enable parity checking - stopbits=STOPBITS_ONE, #number of stopbits - timeout=3, #set a timeout value, None for waiting forever - xonxoff=0, #enable software flow control - rtscts=0, #enable RTS/CTS flow control - ) - s.setRTS(1) - s.setDTR(1) - s.flushInput() - s.flushOutput() - s.write('hello') - print repr(s.read(5)) - print s.inWaiting() - del s - diff --git a/icarus-miner/data/usr/lib/python2.6/site-packages/serial/serialposix.pyc b/icarus-miner/data/usr/lib/python2.6/site-packages/serial/serialposix.pyc deleted file mode 100644 index 999a067..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/site-packages/serial/serialposix.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/site-packages/serial/serialutil.py b/icarus-miner/data/usr/lib/python2.6/site-packages/serial/serialutil.py deleted file mode 100644 index fd466f2..0000000 --- a/icarus-miner/data/usr/lib/python2.6/site-packages/serial/serialutil.py +++ /dev/null @@ -1,400 +0,0 @@ -#! python -# Python Serial Port Extension for Win32, Linux, BSD, Jython -# see __init__.py -# -# (C) 2001-2008 Chris Liechti -# this is distributed under a free software license, see license.txt - -PARITY_NONE, PARITY_EVEN, PARITY_ODD, PARITY_MARK, PARITY_SPACE = 'N', 'E', 'O', 'M', 'S' -STOPBITS_ONE, STOPBITS_TWO = (1, 2) -FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS = (5,6,7,8) - -PARITY_NAMES = { - PARITY_NONE: 'None', - PARITY_EVEN: 'Even', - PARITY_ODD: 'Odd', - PARITY_MARK: 'Mark', - PARITY_SPACE:'Space', -} - -XON = chr(17) -XOFF = chr(19) - -#Python < 2.2.3 compatibility -try: - True -except: - True = 1 - False = not True - -class SerialException(Exception): - """Base class for serial port related exceptions.""" - -portNotOpenError = SerialException('Port not open') - -class SerialTimeoutException(SerialException): - """Write timeouts give an exception""" - -writeTimeoutError = SerialTimeoutException("Write timeout") - -class FileLike(object): - """An abstract file like class. - - This class implements readline and readlines based on read and - writelines based on write. - This class is used to provide the above functions for to Serial - port objects. - - Note that when the serial port was opened with _NO_ timeout that - readline blocks until it sees a newline (or the specified size is - reached) and that readlines would never return and therefore - refuses to work (it raises an exception in this case)! - """ - - def read(self, size): raise NotImplementedError - def write(self, s): raise NotImplementedError - - def readline(self, size=None, eol='\n'): - """read a line which is terminated with end-of-line (eol) character - ('\n' by default) or until timeout""" - line = '' - while 1: - c = self.read(1) - if c: - line += c #not very efficient but lines are usually not that long - if c == eol: - break - if size is not None and len(line) >= size: - break - else: - break - return line - - def readlines(self, sizehint=None, eol='\n'): - """read a list of lines, until timeout - sizehint is ignored""" - if self.timeout is None: - raise ValueError, "Serial port MUST have enabled timeout for this function!" - lines = [] - while 1: - line = self.readline(eol=eol) - if line: - lines.append(line) - if line[-1] != eol: #was the line received with a timeout? - break - else: - break - return lines - - def xreadlines(self, sizehint=None): - """just call readlines - here for compatibility""" - return self.readlines() - - def writelines(self, sequence): - for line in sequence: - self.write(line) - - def flush(self): - """flush of file like objects""" - pass - - # iterator for e.g. "for line in Serial(0): ..." usage - def next(self): - line = self.readline() - if not line: raise StopIteration - return line - - def __iter__(self): - return self - - -class SerialBase(FileLike): - """Serial port base class. Provides __init__ function and properties to - get/set port settings.""" - - #default values, may be overriden in subclasses that do not support all values - BAUDRATES = (50,75,110,134,150,200,300,600,1200,1800,2400,4800,9600, - 19200,38400,57600,115200,230400,460800,500000,576000,921600, - 1000000,1152000,1500000,2000000,2500000,3000000,3500000,4000000) - BYTESIZES = (FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS) - PARITIES = (PARITY_NONE, PARITY_EVEN, PARITY_ODD) - STOPBITS = (STOPBITS_ONE, STOPBITS_TWO) - - def __init__(self, - port = None, #number of device, numbering starts at - #zero. if everything fails, the user - #can specify a device string, note - #that this isn't portable anymore - #port will be opened if one is specified - baudrate=9600, #baudrate - bytesize=EIGHTBITS, #number of databits - parity=PARITY_NONE, #enable parity checking - stopbits=STOPBITS_ONE, #number of stopbits - timeout=None, #set a timeout value, None to wait forever - xonxoff=0, #enable software flow control - rtscts=0, #enable RTS/CTS flow control - writeTimeout=None, #set a timeout for writes - dsrdtr=None, #None: use rtscts setting, dsrdtr override if true or false - interCharTimeout=None #Inter-character timeout, None to disable - ): - """Initialize comm port object. If a port is given, then the port will be - opened immediately. Otherwise a Serial port object in closed state - is returned.""" - - self._isOpen = False - self._port = None #correct value is assigned below trough properties - self._baudrate = None #correct value is assigned below trough properties - self._bytesize = None #correct value is assigned below trough properties - self._parity = None #correct value is assigned below trough properties - self._stopbits = None #correct value is assigned below trough properties - self._timeout = None #correct value is assigned below trough properties - self._writeTimeout = None #correct value is assigned below trough properties - self._xonxoff = None #correct value is assigned below trough properties - self._rtscts = None #correct value is assigned below trough properties - self._dsrdtr = None #correct value is assigned below trough properties - self._interCharTimeout = None #correct value is assigned below trough properties - - #assign values using get/set methods using the properties feature - self.port = port - self.baudrate = baudrate - self.bytesize = bytesize - self.parity = parity - self.stopbits = stopbits - self.timeout = timeout - self.writeTimeout = writeTimeout - self.xonxoff = xonxoff - self.rtscts = rtscts - self.dsrdtr = dsrdtr - self.interCharTimeout = interCharTimeout - - if port is not None: - self.open() - - def isOpen(self): - """Check if the port is opened.""" - return self._isOpen - - # - - - - - - - - - - - - - - - - - - - - - - - - - - #TODO: these are not realy needed as the is the BAUDRATES etc attribute... - #maybe i remove them before the final release... - - def getSupportedBaudrates(self): - return [(str(b), b) for b in self.BAUDRATES] - - def getSupportedByteSizes(self): - return [(str(b), b) for b in self.BYTESIZES] - - def getSupportedStopbits(self): - return [(str(b), b) for b in self.STOPBITS] - - def getSupportedParities(self): - return [(PARITY_NAMES[b], b) for b in self.PARITIES] - - # - - - - - - - - - - - - - - - - - - - - - - - - - - def setPort(self, port): - """Change the port. The attribute portstr is set to a string that - contains the name of the port.""" - - was_open = self._isOpen - if was_open: self.close() - if port is not None: - if type(port) in [type(''), type(u'')]: #strings are taken directly - self.portstr = port - else: - self.portstr = self.makeDeviceName(port) - else: - self.portstr = None - self._port = port - if was_open: self.open() - - def getPort(self): - """Get the current port setting. The value that was passed on init or using - setPort() is passed back. See also the attribute portstr which contains - the name of the port as a string.""" - return self._port - - port = property(getPort, setPort, doc="Port setting") - - - def setBaudrate(self, baudrate): - """Change baudrate. It raises a ValueError if the port is open and the - baudrate is not possible. If the port is closed, then tha value is - accepted and the exception is raised when the port is opened.""" - #~ if baudrate not in self.BAUDRATES: raise ValueError("Not a valid baudrate: %r" % baudrate) - try: - self._baudrate = int(baudrate) - except TypeError: - raise ValueError("Not a valid baudrate: %r" % (baudrate,)) - else: - if self._isOpen: self._reconfigurePort() - - def getBaudrate(self): - """Get the current baudrate setting.""" - return self._baudrate - - baudrate = property(getBaudrate, setBaudrate, doc="Baudrate setting") - - - def setByteSize(self, bytesize): - """Change byte size.""" - if bytesize not in self.BYTESIZES: raise ValueError("Not a valid byte size: %r" % (bytesize,)) - self._bytesize = bytesize - if self._isOpen: self._reconfigurePort() - - def getByteSize(self): - """Get the current byte size setting.""" - return self._bytesize - - bytesize = property(getByteSize, setByteSize, doc="Byte size setting") - - - def setParity(self, parity): - """Change parity setting.""" - if parity not in self.PARITIES: raise ValueError("Not a valid parity: %r" % (parity,)) - self._parity = parity - if self._isOpen: self._reconfigurePort() - - def getParity(self): - """Get the current parity setting.""" - return self._parity - - parity = property(getParity, setParity, doc="Parity setting") - - - def setStopbits(self, stopbits): - """Change stopbits size.""" - if stopbits not in self.STOPBITS: raise ValueError("Not a valid stopbit size: %r" % (stopbits,)) - self._stopbits = stopbits - if self._isOpen: self._reconfigurePort() - - def getStopbits(self): - """Get the current stopbits setting.""" - return self._stopbits - - stopbits = property(getStopbits, setStopbits, doc="Stopbits setting") - - - def setTimeout(self, timeout): - """Change timeout setting.""" - if timeout is not None: - if timeout < 0: raise ValueError("Not a valid timeout: %r" % (timeout,)) - try: - timeout + 1 #test if it's a number, will throw a TypeError if not... - except TypeError: - raise ValueError("Not a valid timeout: %r" % (timeout,)) - - self._timeout = timeout - if self._isOpen: self._reconfigurePort() - - def getTimeout(self): - """Get the current timeout setting.""" - return self._timeout - - timeout = property(getTimeout, setTimeout, doc="Timeout setting for read()") - - - def setWriteTimeout(self, timeout): - """Change timeout setting.""" - if timeout is not None: - if timeout < 0: raise ValueError("Not a valid timeout: %r" % (timeout,)) - try: - timeout + 1 #test if it's a number, will throw a TypeError if not... - except TypeError: - raise ValueError("Not a valid timeout: %r" % timeout) - - self._writeTimeout = timeout - if self._isOpen: self._reconfigurePort() - - def getWriteTimeout(self): - """Get the current timeout setting.""" - return self._writeTimeout - - writeTimeout = property(getWriteTimeout, setWriteTimeout, doc="Timeout setting for write()") - - - def setXonXoff(self, xonxoff): - """Change XonXoff setting.""" - self._xonxoff = xonxoff - if self._isOpen: self._reconfigurePort() - - def getXonXoff(self): - """Get the current XonXoff setting.""" - return self._xonxoff - - xonxoff = property(getXonXoff, setXonXoff, doc="Xon/Xoff setting") - - def setRtsCts(self, rtscts): - """Change RtsCts flow control setting.""" - self._rtscts = rtscts - if self._isOpen: self._reconfigurePort() - - def getRtsCts(self): - """Get the current RtsCts flow control setting.""" - return self._rtscts - - rtscts = property(getRtsCts, setRtsCts, doc="RTS/CTS flow control setting") - - def setDsrDtr(self, dsrdtr=None): - """Change DsrDtr flow control setting.""" - if dsrdtr is None: - #if not set, keep backwards compatibility and follow rtscts setting - self._dsrdtr = self._rtscts - else: - #if defined independently, follow its value - self._dsrdtr = dsrdtr - if self._isOpen: self._reconfigurePort() - - def getDsrDtr(self): - """Get the current DsrDtr flow control setting.""" - return self._dsrdtr - - dsrdtr = property(getDsrDtr, setDsrDtr, "DSR/DTR flow control setting") - - def setInterCharTimeout(self, interCharTimeout): - """Change inter-character timeout setting.""" - if interCharTimeout is not None: - if interCharTimeout < 0: raise ValueError("Not a valid timeout: %r" % interCharTimeout) - try: - interCharTimeout + 1 #test if it's a number, will throw a TypeError if not... - except TypeError: - raise ValueError("Not a valid timeout: %r" % interCharTimeout) - - self._interCharTimeout = interCharTimeout - if self._isOpen: self._reconfigurePort() - - def getInterCharTimeout(self): - """Get the current inter-character timeout setting.""" - return self._interCharTimeout - - interCharTimeout = property(getInterCharTimeout, setInterCharTimeout, doc="Inter-character timeout setting for read()") - - - # - - - - - - - - - - - - - - - - - - - - - - - - - - def __repr__(self): - """String representation of the current port settings and its state.""" - return "%s(port=%r, baudrate=%r, bytesize=%r, parity=%r, stopbits=%r, timeout=%r, xonxoff=%r, rtscts=%r, dsrdtr=%r)" % ( - self.__class__.__name__, - id(self), - self._isOpen, - self.portstr, - self.baudrate, - self.bytesize, - self.parity, - self.stopbits, - self.timeout, - self.xonxoff, - self.rtscts, - self.dsrdtr, - ) - -if __name__ == '__main__': - s = SerialBase() - print s.portstr - print s.getSupportedBaudrates() - print s.getSupportedByteSizes() - print s.getSupportedParities() - print s.getSupportedStopbits() - print s diff --git a/icarus-miner/data/usr/lib/python2.6/site-packages/serial/serialutil.pyc b/icarus-miner/data/usr/lib/python2.6/site-packages/serial/serialutil.pyc deleted file mode 100644 index 87df042..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/site-packages/serial/serialutil.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/site-packages/serial/serialwin32.py b/icarus-miner/data/usr/lib/python2.6/site-packages/serial/serialwin32.py deleted file mode 100644 index f5e8961..0000000 --- a/icarus-miner/data/usr/lib/python2.6/site-packages/serial/serialwin32.py +++ /dev/null @@ -1,336 +0,0 @@ -#! python -# Python Serial Port Extension for Win32, Linux, BSD, Jython -# serial driver for win32 -# see __init__.py -# -# (C) 2001-2008 Chris Liechti -# this is distributed under a free software license, see license.txt - -import win32file # The base COM port and file IO functions. -import win32event # We use events and the WaitFor[Single|Multiple]Objects functions. -import win32con # constants. -from serialutil import * - -#from winbase.h. these should realy be in win32con -MS_CTS_ON = 16 -MS_DSR_ON = 32 -MS_RING_ON = 64 -MS_RLSD_ON = 128 - -def device(portnum): - """Turn a port number into a device name""" - return 'COM%d' % (portnum+1) #numbers are transformed to a string - -class Serial(SerialBase): - """Serial port implemenation for Win32. This implemenatation requires a - win32all installation.""" - - BAUDRATES = (50,75,110,134,150,200,300,600,1200,1800,2400,4800,9600, - 19200,38400,57600,115200) - - def open(self): - """Open port with current settings. This may throw a SerialException - if the port cannot be opened.""" - if self._port is None: - raise SerialException("Port must be configured before it can be used.") - self.hComPort = None - # the "\\.\COMx" format is required for devices other than COM1-COM8 - # not all versions of windows seem to support this properly - # so that the first few ports are used with the DOS device name - port = self.portstr - if port.upper().startswith('COM') and int(port[3:]) > 8: - port = '\\\\.\\' + port - try: - self.hComPort = win32file.CreateFile(port, - win32con.GENERIC_READ | win32con.GENERIC_WRITE, - 0, # exclusive access - None, # no security - win32con.OPEN_EXISTING, - win32con.FILE_FLAG_OVERLAPPED, - None) - except Exception, msg: - self.hComPort = None #'cause __del__ is called anyway - raise SerialException("could not open port %s: %s" % (self.portstr, msg)) - # Setup a 4k buffer - win32file.SetupComm(self.hComPort, 4096, 4096) - - #Save original timeout values: - self._orgTimeouts = win32file.GetCommTimeouts(self.hComPort) - - self._rtsState = win32file.RTS_CONTROL_ENABLE - self._dtrState = win32file.DTR_CONTROL_ENABLE - - self._reconfigurePort() - - # Clear buffers: - # Remove anything that was there - win32file.PurgeComm(self.hComPort, - win32file.PURGE_TXCLEAR | win32file.PURGE_TXABORT | - win32file.PURGE_RXCLEAR | win32file.PURGE_RXABORT) - - self._overlappedRead = win32file.OVERLAPPED() - self._overlappedRead.hEvent = win32event.CreateEvent(None, 1, 0, None) - self._overlappedWrite = win32file.OVERLAPPED() - #~ self._overlappedWrite.hEvent = win32event.CreateEvent(None, 1, 0, None) - self._overlappedWrite.hEvent = win32event.CreateEvent(None, 0, 0, None) - self._isOpen = True - - def _reconfigurePort(self): - """Set communication parameters on opened port.""" - if not self.hComPort: - raise SerialException("Can only operate on a valid port handle") - - #Set Windows timeout values - #timeouts is a tuple with the following items: - #(ReadIntervalTimeout,ReadTotalTimeoutMultiplier, - # ReadTotalTimeoutConstant,WriteTotalTimeoutMultiplier, - # WriteTotalTimeoutConstant) - if self._timeout is None: - timeouts = (0, 0, 0, 0, 0) - elif self._timeout == 0: - timeouts = (win32con.MAXDWORD, 0, 0, 0, 0) - else: - timeouts = (0, 0, int(self._timeout*1000), 0, 0) - if self._timeout != 0 and self._interCharTimeout is not None: - timeouts = (int(self._interCharTimeout * 1000),) + timeouts[1:] - - if self._writeTimeout is None: - pass - elif self._writeTimeout == 0: - timeouts = timeouts[:-2] + (0, win32con.MAXDWORD) - else: - timeouts = timeouts[:-2] + (0, int(self._writeTimeout*1000)) - win32file.SetCommTimeouts(self.hComPort, timeouts) - - win32file.SetCommMask(self.hComPort, win32file.EV_ERR) - - # Setup the connection info. - # Get state and modify it: - comDCB = win32file.GetCommState(self.hComPort) - comDCB.BaudRate = self._baudrate - - if self._bytesize == FIVEBITS: - comDCB.ByteSize = 5 - elif self._bytesize == SIXBITS: - comDCB.ByteSize = 6 - elif self._bytesize == SEVENBITS: - comDCB.ByteSize = 7 - elif self._bytesize == EIGHTBITS: - comDCB.ByteSize = 8 - else: - raise ValueError("Unsupported number of data bits: %r" % self._bytesize) - - if self._parity == PARITY_NONE: - comDCB.Parity = win32file.NOPARITY - comDCB.fParity = 0 # Dis/Enable Parity Check - elif self._parity == PARITY_EVEN: - comDCB.Parity = win32file.EVENPARITY - comDCB.fParity = 1 # Dis/Enable Parity Check - elif self._parity == PARITY_ODD: - comDCB.Parity = win32file.ODDPARITY - comDCB.fParity = 1 # Dis/Enable Parity Check - elif self._parity == PARITY_MARK: - comDCB.Parity = win32file.MARKPARITY - comDCB.fParity = 1 # Dis/Enable Parity Check - elif self._parity == PARITY_SPACE: - comDCB.Parity = win32file.SPACEPARITY - comDCB.fParity = 1 # Dis/Enable Parity Check - else: - raise ValueError("Unsupported parity mode: %r" % self._parity) - - if self._stopbits == STOPBITS_ONE: - comDCB.StopBits = win32file.ONESTOPBIT - elif self._stopbits == STOPBITS_TWO: - comDCB.StopBits = win32file.TWOSTOPBITS - else: - raise ValueError("Unsupported number of stop bits: %r" % self._stopbits) - - comDCB.fBinary = 1 # Enable Binary Transmission - # Char. w/ Parity-Err are replaced with 0xff (if fErrorChar is set to TRUE) - if self._rtscts: - comDCB.fRtsControl = win32file.RTS_CONTROL_HANDSHAKE - else: - comDCB.fRtsControl = self._rtsState - if self._dsrdtr: - comDCB.fDtrControl = win32file.DTR_CONTROL_HANDSHAKE - else: - comDCB.fDtrControl = self._dtrState - comDCB.fOutxCtsFlow = self._rtscts - comDCB.fOutxDsrFlow = self._dsrdtr - comDCB.fOutX = self._xonxoff - comDCB.fInX = self._xonxoff - comDCB.fNull = 0 - comDCB.fErrorChar = 0 - comDCB.fAbortOnError = 0 - comDCB.XonChar = XON - comDCB.XoffChar = XOFF - - try: - win32file.SetCommState(self.hComPort, comDCB) - except win32file.error, e: - raise ValueError("Cannot configure port, some setting was wrong. Original message: %s" % e) - - #~ def __del__(self): - #~ self.close() - - def close(self): - """Close port""" - if self._isOpen: - if self.hComPort: - try: - # Restore original timeout values: - win32file.SetCommTimeouts(self.hComPort, self._orgTimeouts) - except win32file.error: - # ignore errors. can happen for unplugged USB serial devices - pass - # Close COM-Port: - win32file.CloseHandle(self.hComPort) - win32file.CloseHandle(self._overlappedRead.hEvent) - win32file.CloseHandle(self._overlappedWrite.hEvent) - self.hComPort = None - self._isOpen = False - - def makeDeviceName(self, port): - return device(port) - - # - - - - - - - - - - - - - - - - - - - - - - - - - - def inWaiting(self): - """Return the number of characters currently in the input buffer.""" - flags, comstat = win32file.ClearCommError(self.hComPort) - return comstat.cbInQue - - def read(self, size=1): - """Read size bytes from the serial port. If a timeout is set it may - return less characters as requested. With no timeout it will block - until the requested number of bytes is read.""" - if not self.hComPort: raise portNotOpenError - if size > 0: - win32event.ResetEvent(self._overlappedRead.hEvent) - flags, comstat = win32file.ClearCommError(self.hComPort) - if self.timeout == 0: - n = min(comstat.cbInQue, size) - if n > 0: - rc, buf = win32file.ReadFile(self.hComPort, win32file.AllocateReadBuffer(n), self._overlappedRead) - win32event.WaitForSingleObject(self._overlappedRead.hEvent, win32event.INFINITE) - read = str(buf) - else: - read = '' - else: - rc, buf = win32file.ReadFile(self.hComPort, win32file.AllocateReadBuffer(size), self._overlappedRead) - n = win32file.GetOverlappedResult(self.hComPort, self._overlappedRead, 1) - read = str(buf[:n]) - else: - read = '' - return read - - def write(self, data): - """Output the given string over the serial port.""" - if not self.hComPort: raise portNotOpenError - if not isinstance(data, str): - raise TypeError('expected str, got %s' % type(data)) - #print repr(s), - if data: - #~ win32event.ResetEvent(self._overlappedWrite.hEvent) - err, n = win32file.WriteFile(self.hComPort, data, self._overlappedWrite) - if err: #will be ERROR_IO_PENDING: - # Wait for the write to complete. - #~ win32event.WaitForSingleObject(self._overlappedWrite.hEvent, win32event.INFINITE) - n = win32file.GetOverlappedResult(self.hComPort, self._overlappedWrite, 1) - if n != len(data): - raise writeTimeoutError - - - def flushInput(self): - """Clear input buffer, discarding all that is in the buffer.""" - if not self.hComPort: raise portNotOpenError - win32file.PurgeComm(self.hComPort, win32file.PURGE_RXCLEAR | win32file.PURGE_RXABORT) - - def flushOutput(self): - """Clear output buffer, aborting the current output and - discarding all that is in the buffer.""" - if not self.hComPort: raise portNotOpenError - win32file.PurgeComm(self.hComPort, win32file.PURGE_TXCLEAR | win32file.PURGE_TXABORT) - - def sendBreak(self, duration=0.25): - """Send break condition. Timed, returns to idle state after given duration.""" - if not self.hComPort: raise portNotOpenError - import time - win32file.SetCommBreak(self.hComPort) - time.sleep(duration) - win32file.ClearCommBreak(self.hComPort) - - def setBreak(self, level=1): - """Set break: Controls TXD. When active, to transmitting is possible.""" - if not self.hComPort: raise portNotOpenError - if level: - win32file.SetCommBreak(self.hComPort) - else: - win32file.ClearCommBreak(self.hComPort) - - def setRTS(self, level=1): - """Set terminal status line: Request To Send""" - if not self.hComPort: raise portNotOpenError - if level: - self._rtsState = win32file.RTS_CONTROL_ENABLE - win32file.EscapeCommFunction(self.hComPort, win32file.SETRTS) - else: - self._rtsState = win32file.RTS_CONTROL_DISABLE - win32file.EscapeCommFunction(self.hComPort, win32file.CLRRTS) - - def setDTR(self, level=1): - """Set terminal status line: Data Terminal Ready""" - if not self.hComPort: raise portNotOpenError - if level: - self._dtrState = win32file.DTR_CONTROL_ENABLE - win32file.EscapeCommFunction(self.hComPort, win32file.SETDTR) - else: - self._dtrState = win32file.DTR_CONTROL_DISABLE - win32file.EscapeCommFunction(self.hComPort, win32file.CLRDTR) - - def getCTS(self): - """Read terminal status line: Clear To Send""" - if not self.hComPort: raise portNotOpenError - return MS_CTS_ON & win32file.GetCommModemStatus(self.hComPort) != 0 - - def getDSR(self): - """Read terminal status line: Data Set Ready""" - if not self.hComPort: raise portNotOpenError - return MS_DSR_ON & win32file.GetCommModemStatus(self.hComPort) != 0 - - def getRI(self): - """Read terminal status line: Ring Indicator""" - if not self.hComPort: raise portNotOpenError - return MS_RING_ON & win32file.GetCommModemStatus(self.hComPort) != 0 - - def getCD(self): - """Read terminal status line: Carrier Detect""" - if not self.hComPort: raise portNotOpenError - return MS_RLSD_ON & win32file.GetCommModemStatus(self.hComPort) != 0 - - # - - platform specific - - - - - - def setXON(self, level=True): - """Platform specific - set flow state.""" - if not self.hComPort: raise portNotOpenError - if level: - win32file.EscapeCommFunction(self.hComPort, win32file.SETXON) - else: - win32file.EscapeCommFunction(self.hComPort, win32file.SETXOFF) - -#Nur Testfunktion!! -if __name__ == '__main__': - s = Serial(0) - print s - - s = Serial() - print s - - - s.baudrate = 19200 - s.databits = 7 - s.close() - s.port = 0 - s.open() - print s - diff --git a/icarus-miner/data/usr/lib/python2.6/site-packages/serial/sermsdos.py b/icarus-miner/data/usr/lib/python2.6/site-packages/serial/sermsdos.py deleted file mode 100644 index a516118..0000000 --- a/icarus-miner/data/usr/lib/python2.6/site-packages/serial/sermsdos.py +++ /dev/null @@ -1,215 +0,0 @@ -# sermsdos.py -# -# History: -# -# 3rd September 2002 Dave Haynes -# 1. First defined -# -# Although this code should run under the latest versions of -# Python, on DOS-based platforms such as Windows 95 and 98, -# it has been specifically written to be compatible with -# PyDOS, available at: -# http://www.python.org/ftp/python/wpy/dos.html -# -# PyDOS is a stripped-down version of Python 1.5.2 for -# DOS machines. Therefore, in making changes to this file, -# please respect Python 1.5.2 syntax. In addition, please -# limit the width of this file to 60 characters. -# -# Note also that the modules in PyDOS contain fewer members -# than other versions, so we are restricted to using the -# following: -# -# In module os: -# ------------- -# environ, chdir, getcwd, getpid, umask, fdopen, close, -# dup, dup2, fstat, lseek, open, read, write, O_RDONLY, -# O_WRONLY, O_RDWR, O_APPEND, O_CREAT, O_EXCL, O_TRUNC, -# access, F_OK, R_OK, W_OK, X_OK, chmod, listdir, mkdir, -# remove, rename, renames, rmdir, stat, unlink, utime, -# execl, execle, execlp, execlpe, execvp, execvpe, _exit, -# system. -# -# In module os.path: -# ------------------ -# curdir, pardir, sep, altsep, pathsep, defpath, linesep. -# - -import os -import sys -import string -import serialutil - -BAUD_RATES = { - 110: "11", - 150: "15", - 300: "30", - 600: "60", - 1200: "12", - 2400: "24", - 4800: "48", - 9600: "96", - 19200: "19"} - -(PARITY_NONE, PARITY_EVEN, PARITY_ODD, PARITY_MARK, -PARITY_SPACE) = range(5) -(STOPBITS_ONE, STOPBITS_ONEANDAHALF, -STOPBITS_TWO) = (1, 1.5, 2) -FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS = (5,6,7,8) -(RETURN_ERROR, RETURN_BUSY, RETURN_RETRY, RETURN_READY, -RETURN_NONE) = ('E', 'B', 'P', 'R', 'N') -portNotOpenError = ValueError('port not open') - -def device(portnum): - return 'COM%d' % (portnum+1) - -class Serial(serialutil.FileLike): - """ - port: number of device; numbering starts at - zero. if everything fails, the user can - specify a device string, note that this - isn't portable any more - baudrate: baud rate - bytesize: number of databits - parity: enable parity checking - stopbits: number of stopbits - timeout: set a timeout (None for waiting forever) - xonxoff: enable software flow control - rtscts: enable RTS/CTS flow control - retry: DOS retry mode - """ - def __init__(self, - port, - baudrate = 9600, - bytesize = EIGHTBITS, - parity = PARITY_NONE, - stopbits = STOPBITS_ONE, - timeout = None, - xonxoff = 0, - rtscts = 0, - retry = RETURN_RETRY - ): - - if type(port) == type(''): - #strings are taken directly - self.portstr = port - else: - #numbers are transformed to a string - self.portstr = device(port+1) - - self.baud = BAUD_RATES[baudrate] - self.bytesize = str(bytesize) - - if parity == PARITY_NONE: - self.parity = 'N' - elif parity == PARITY_EVEN: - self.parity = 'E' - elif parity == PARITY_ODD: - self.parity = 'O' - elif parity == PARITY_MARK: - self.parity = 'M' - elif parity == PARITY_SPACE: - self.parity = 'S' - - self.stop = str(stopbits) - self.retry = retry - self.filename = "sermsdos.tmp" - - self._config(self.portstr, self.baud, self.parity, - self.bytesize, self.stop, self.retry, self.filename) - - def __del__(self): - self.close() - - def close(self): - pass - - def _config(self, port, baud, parity, data, stop, retry, - filename): - comString = string.join(("MODE ", port, ":" - , " BAUD= ", baud, " PARITY= ", parity - , " DATA= ", data, " STOP= ", stop, " RETRY= ", - retry, " > ", filename ), '') - os.system(comString) - - def setBaudrate(self, baudrate): - self._config(self.portstr, BAUD_RATES[baudrate], - self.parity, self.bytesize, self.stop, self.retry, - self.filename) - - def inWaiting(self): - """returns the number of bytes waiting to be read""" - raise NotImplementedError - - def read(self, num = 1): - """Read num bytes from serial port""" - handle = os.open(self.portstr, - os.O_RDONLY | os.O_BINARY) - # print os.fstat(handle) - rv = os.read(handle, num) - os.close(handle) - return rv - - def write(self, s): - """Write string to serial port""" - handle = os.open(self.portstr, - os.O_WRONLY | os.O_BINARY) - rv = os.write(handle, s) - os.close(handle) - return rv - - def flushInput(self): - raise NotImplementedError - - def flushOutput(self): - raise NotImplementedError - - def sendBreak(self): - raise NotImplementedError - - def setRTS(self,level=1): - """Set terminal status line""" - raise NotImplementedError - - def setDTR(self,level=1): - """Set terminal status line""" - raise NotImplementedError - - def getCTS(self): - """Eead terminal status line""" - raise NotImplementedError - - def getDSR(self): - """Eead terminal status line""" - raise NotImplementedError - - def getRI(self): - """Eead terminal status line""" - raise NotImplementedError - - def getCD(self): - """Eead terminal status line""" - raise NotImplementedError - - def __repr__(self): - return string.join(( ": ", self.portstr - , self.baud, self.parity, self.bytesize, self.stop, - self.retry , self.filename), ' ') - -if __name__ == '__main__': - print __name__ - s = Serial(0) - print s - - - - - - - - - - - - - diff --git a/icarus-miner/data/usr/lib/python2.6/site.py b/icarus-miner/data/usr/lib/python2.6/site.py deleted file mode 100644 index 21c7db2..0000000 --- a/icarus-miner/data/usr/lib/python2.6/site.py +++ /dev/null @@ -1,565 +0,0 @@ -"""Append module search paths for third-party packages to sys.path. - -**************************************************************** -* This module is automatically imported during initialization. * -**************************************************************** - -In earlier versions of Python (up to 1.5a3), scripts or modules that -needed to use site-specific modules would place ``import site'' -somewhere near the top of their code. Because of the automatic -import, this is no longer necessary (but code that does it still -works). - -This will append site-specific paths to the module search path. On -Unix (including Mac OSX), it starts with sys.prefix and -sys.exec_prefix (if different) and appends -lib/python/site-packages as well as lib/site-python. -On other platforms (such as Windows), it tries each of the -prefixes directly, as well as with lib/site-packages appended. The -resulting directories, if they exist, are appended to sys.path, and -also inspected for path configuration files. - -A path configuration file is a file whose name has the form -.pth; its contents are additional directories (one per line) -to be added to sys.path. Non-existing directories (or -non-directories) are never added to sys.path; no directory is added to -sys.path more than once. Blank lines and lines beginning with -'#' are skipped. Lines starting with 'import' are executed. - -For example, suppose sys.prefix and sys.exec_prefix are set to -/usr/local and there is a directory /usr/local/lib/python2.5/site-packages -with three subdirectories, foo, bar and spam, and two path -configuration files, foo.pth and bar.pth. Assume foo.pth contains the -following: - - # foo package configuration - foo - bar - bletch - -and bar.pth contains: - - # bar package configuration - bar - -Then the following directories are added to sys.path, in this order: - - /usr/local/lib/python2.5/site-packages/bar - /usr/local/lib/python2.5/site-packages/foo - -Note that bletch is omitted because it doesn't exist; bar precedes foo -because bar.pth comes alphabetically before foo.pth; and spam is -omitted because it is not mentioned in either path configuration file. - -After these path manipulations, an attempt is made to import a module -named sitecustomize, which can perform arbitrary additional -site-specific customizations. If this import fails with an -ImportError exception, it is silently ignored. - -""" - -import sys -import os -import __builtin__ - -# Prefixes for site-packages; add additional prefixes like /usr/local here -PREFIXES = [sys.prefix, sys.exec_prefix] -# Enable per user site-packages directory -# set it to False to disable the feature or True to force the feature -ENABLE_USER_SITE = None -# for distutils.commands.install -USER_SITE = None -USER_BASE = None - - -def makepath(*paths): - dir = os.path.abspath(os.path.join(*paths)) - return dir, os.path.normcase(dir) - - -def abs__file__(): - """Set all module' __file__ attribute to an absolute path""" - for m in sys.modules.values(): - if hasattr(m, '__loader__'): - continue # don't mess with a PEP 302-supplied __file__ - try: - m.__file__ = os.path.abspath(m.__file__) - except AttributeError: - continue - - -def removeduppaths(): - """ Remove duplicate entries from sys.path along with making them - absolute""" - # This ensures that the initial path provided by the interpreter contains - # only absolute pathnames, even if we're running from the build directory. - L = [] - known_paths = set() - for dir in sys.path: - # Filter out duplicate paths (on case-insensitive file systems also - # if they only differ in case); turn relative paths into absolute - # paths. - dir, dircase = makepath(dir) - if not dircase in known_paths: - L.append(dir) - known_paths.add(dircase) - sys.path[:] = L - return known_paths - -# XXX This should not be part of site.py, since it is needed even when -# using the -S option for Python. See http://www.python.org/sf/586680 -def addbuilddir(): - """Append ./build/lib. in case we're running in the build dir - (especially for Guido :-)""" - from distutils.util import get_platform - s = "build/lib.%s-%.3s" % (get_platform(), sys.version) - if hasattr(sys, 'gettotalrefcount'): - s += '-pydebug' - s = os.path.join(os.path.dirname(sys.path[-1]), s) - sys.path.append(s) - - -def _init_pathinfo(): - """Return a set containing all existing directory entries from sys.path""" - d = set() - for dir in sys.path: - try: - if os.path.isdir(dir): - dir, dircase = makepath(dir) - d.add(dircase) - except TypeError: - continue - return d - - -def addpackage(sitedir, name, known_paths): - """Process a .pth file within the site-packages directory: - For each line in the file, either combine it with sitedir to a path - and add that to known_paths, or execute it if it starts with 'import '. - """ - if known_paths is None: - _init_pathinfo() - reset = 1 - else: - reset = 0 - fullname = os.path.join(sitedir, name) - try: - f = open(fullname, "rU") - except IOError: - return - with f: - for line in f: - if line.startswith("#"): - continue - if line.startswith(("import ", "import\t")): - exec line - continue - line = line.rstrip() - dir, dircase = makepath(sitedir, line) - if not dircase in known_paths and os.path.exists(dir): - sys.path.append(dir) - known_paths.add(dircase) - if reset: - known_paths = None - return known_paths - - -def addsitedir(sitedir, known_paths=None): - """Add 'sitedir' argument to sys.path if missing and handle .pth files in - 'sitedir'""" - if known_paths is None: - known_paths = _init_pathinfo() - reset = 1 - else: - reset = 0 - sitedir, sitedircase = makepath(sitedir) - if not sitedircase in known_paths: - sys.path.append(sitedir) # Add path component - try: - names = os.listdir(sitedir) - except os.error: - return - dotpth = os.extsep + "pth" - names = [name for name in names if name.endswith(dotpth)] - for name in sorted(names): - addpackage(sitedir, name, known_paths) - if reset: - known_paths = None - return known_paths - - -def check_enableusersite(): - """Check if user site directory is safe for inclusion - - The function tests for the command line flag (including environment var), - process uid/gid equal to effective uid/gid. - - None: Disabled for security reasons - False: Disabled by user (command line option) - True: Safe and enabled - """ - if sys.flags.no_user_site: - return False - - if hasattr(os, "getuid") and hasattr(os, "geteuid"): - # check process uid == effective uid - if os.geteuid() != os.getuid(): - return None - if hasattr(os, "getgid") and hasattr(os, "getegid"): - # check process gid == effective gid - if os.getegid() != os.getgid(): - return None - - return True - - -def addusersitepackages(known_paths): - """Add a per user site-package to sys.path - - Each user has its own python directory with site-packages in the - home directory. - - USER_BASE is the root directory for all Python versions - - USER_SITE is the user specific site-packages directory - - USER_SITE/.. can be used for data. - """ - global USER_BASE, USER_SITE, ENABLE_USER_SITE - env_base = os.environ.get("PYTHONUSERBASE", None) - - def joinuser(*args): - return os.path.expanduser(os.path.join(*args)) - - #if sys.platform in ('os2emx', 'riscos'): - # # Don't know what to put here - # USER_BASE = '' - # USER_SITE = '' - if os.name == "nt": - base = os.environ.get("APPDATA") or "~" - USER_BASE = env_base if env_base else joinuser(base, "Python") - USER_SITE = os.path.join(USER_BASE, - "Python" + sys.version[0] + sys.version[2], - "site-packages") - else: - USER_BASE = env_base if env_base else joinuser("~", ".local") - USER_SITE = os.path.join(USER_BASE, "lib", - "python" + sys.version[:3], - "site-packages") - - if ENABLE_USER_SITE and os.path.isdir(USER_SITE): - addsitedir(USER_SITE, known_paths) - return known_paths - - -def addsitepackages(known_paths): - """Add site-packages (and possibly site-python) to sys.path""" - sitedirs = [] - seen = [] - - for prefix in PREFIXES: - if not prefix or prefix in seen: - continue - seen.append(prefix) - - if sys.platform in ('os2emx', 'riscos'): - sitedirs.append(os.path.join(prefix, "Lib", "site-packages")) - elif os.sep == '/': - sitedirs.append(os.path.join(prefix, "lib", - "python" + sys.version[:3], - "site-packages")) - sitedirs.append(os.path.join(prefix, "lib", "site-python")) - else: - sitedirs.append(prefix) - sitedirs.append(os.path.join(prefix, "lib", "site-packages")) - - if sys.platform == "darwin": - # for framework builds *only* we add the standard Apple - # locations. Currently only per-user, but /Library and - # /Network/Library could be added too - if 'Python.framework' in prefix: - sitedirs.append( - os.path.expanduser( - os.path.join("~", "Library", "Python", - sys.version[:3], "site-packages"))) - - for sitedir in sitedirs: - if os.path.isdir(sitedir): - addsitedir(sitedir, known_paths) - - return known_paths - - -def setBEGINLIBPATH(): - """The OS/2 EMX port has optional extension modules that do double duty - as DLLs (and must use the .DLL file extension) for other extensions. - The library search path needs to be amended so these will be found - during module import. Use BEGINLIBPATH so that these are at the start - of the library search path. - - """ - dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload") - libpath = os.environ['BEGINLIBPATH'].split(';') - if libpath[-1]: - libpath.append(dllpath) - else: - libpath[-1] = dllpath - os.environ['BEGINLIBPATH'] = ';'.join(libpath) - - -def setquit(): - """Define new built-ins 'quit' and 'exit'. - These are simply strings that display a hint on how to exit. - - """ - if os.sep == ':': - eof = 'Cmd-Q' - elif os.sep == '\\': - eof = 'Ctrl-Z plus Return' - else: - eof = 'Ctrl-D (i.e. EOF)' - - class Quitter(object): - def __init__(self, name): - self.name = name - def __repr__(self): - return 'Use %s() or %s to exit' % (self.name, eof) - def __call__(self, code=None): - # Shells like IDLE catch the SystemExit, but listen when their - # stdin wrapper is closed. - try: - sys.stdin.close() - except: - pass - raise SystemExit(code) - __builtin__.quit = Quitter('quit') - __builtin__.exit = Quitter('exit') - - -class _Printer(object): - """interactive prompt objects for printing the license text, a list of - contributors and the copyright notice.""" - - MAXLINES = 23 - - def __init__(self, name, data, files=(), dirs=()): - self.__name = name - self.__data = data - self.__files = files - self.__dirs = dirs - self.__lines = None - - def __setup(self): - if self.__lines: - return - data = None - for dir in self.__dirs: - for filename in self.__files: - filename = os.path.join(dir, filename) - try: - fp = file(filename, "rU") - data = fp.read() - fp.close() - break - except IOError: - pass - if data: - break - if not data: - data = self.__data - self.__lines = data.split('\n') - self.__linecnt = len(self.__lines) - - def __repr__(self): - self.__setup() - if len(self.__lines) <= self.MAXLINES: - return "\n".join(self.__lines) - else: - return "Type %s() to see the full %s text" % ((self.__name,)*2) - - def __call__(self): - self.__setup() - prompt = 'Hit Return for more, or q (and Return) to quit: ' - lineno = 0 - while 1: - try: - for i in range(lineno, lineno + self.MAXLINES): - print self.__lines[i] - except IndexError: - break - else: - lineno += self.MAXLINES - key = None - while key is None: - key = raw_input(prompt) - if key not in ('', 'q'): - key = None - if key == 'q': - break - -def setcopyright(): - """Set 'copyright' and 'credits' in __builtin__""" - __builtin__.copyright = _Printer("copyright", sys.copyright) - if sys.platform[:4] == 'java': - __builtin__.credits = _Printer( - "credits", - "Jython is maintained by the Jython developers (www.jython.org).") - else: - __builtin__.credits = _Printer("credits", """\ - Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands - for supporting Python development. See www.python.org for more information.""") - here = os.path.dirname(os.__file__) - __builtin__.license = _Printer( - "license", "See http://www.python.org/%.3s/license.html" % sys.version, - ["LICENSE.txt", "LICENSE"], - [os.path.join(here, os.pardir), here, os.curdir]) - - -class _Helper(object): - """Define the built-in 'help'. - This is a wrapper around pydoc.help (with a twist). - - """ - - def __repr__(self): - return "Type help() for interactive help, " \ - "or help(object) for help about object." - def __call__(self, *args, **kwds): - import pydoc - return pydoc.help(*args, **kwds) - -def sethelper(): - __builtin__.help = _Helper() - -def aliasmbcs(): - """On Windows, some default encodings are not provided by Python, - while they are always available as "mbcs" in each locale. Make - them usable by aliasing to "mbcs" in such a case.""" - if sys.platform == 'win32': - import locale, codecs - enc = locale.getdefaultlocale()[1] - if enc.startswith('cp'): # "cp***" ? - try: - codecs.lookup(enc) - except LookupError: - import encodings - encodings._cache[enc] = encodings._unknown - encodings.aliases.aliases[enc] = 'mbcs' - -def setencoding(): - """Set the string encoding used by the Unicode implementation. The - default is 'ascii', but if you're willing to experiment, you can - change this.""" - encoding = "ascii" # Default value set by _PyUnicode_Init() - if 0: - # Enable to support locale aware default string encodings. - import locale - loc = locale.getdefaultlocale() - if loc[1]: - encoding = loc[1] - if 0: - # Enable to switch off string to Unicode coercion and implicit - # Unicode to string conversion. - encoding = "undefined" - if encoding != "ascii": - # On Non-Unicode builds this will raise an AttributeError... - sys.setdefaultencoding(encoding) # Needs Python Unicode build ! - - -def execsitecustomize(): - """Run custom site specific code, if available.""" - try: - import sitecustomize - except ImportError: - pass - - -def execusercustomize(): - """Run custom user specific code, if available.""" - try: - import usercustomize - except ImportError: - pass - - -def main(): - global ENABLE_USER_SITE - - abs__file__() - known_paths = removeduppaths() - if (os.name == "posix" and sys.path and - os.path.basename(sys.path[-1]) == "Modules"): - addbuilddir() - if ENABLE_USER_SITE is None: - ENABLE_USER_SITE = check_enableusersite() - known_paths = addusersitepackages(known_paths) - known_paths = addsitepackages(known_paths) - if sys.platform == 'os2emx': - setBEGINLIBPATH() - setquit() - setcopyright() - sethelper() - aliasmbcs() - setencoding() - execsitecustomize() - if ENABLE_USER_SITE: - execusercustomize() - # Remove sys.setdefaultencoding() so that users cannot change the - # encoding after initialization. The test for presence is needed when - # this module is run as a script, because this code is executed twice. - if hasattr(sys, "setdefaultencoding"): - del sys.setdefaultencoding - -main() - -def _script(): - help = """\ - %s [--user-base] [--user-site] - - Without arguments print some useful information - With arguments print the value of USER_BASE and/or USER_SITE separated - by '%s'. - - Exit codes with --user-base or --user-site: - 0 - user site directory is enabled - 1 - user site directory is disabled by user - 2 - uses site directory is disabled by super user - or for security reasons - >2 - unknown error - """ - args = sys.argv[1:] - if not args: - print "sys.path = [" - for dir in sys.path: - print " %r," % (dir,) - print "]" - print "USER_BASE: %r (%s)" % (USER_BASE, - "exists" if os.path.isdir(USER_BASE) else "doesn't exist") - print "USER_SITE: %r (%s)" % (USER_SITE, - "exists" if os.path.isdir(USER_SITE) else "doesn't exist") - print "ENABLE_USER_SITE: %r" % ENABLE_USER_SITE - sys.exit(0) - - buffer = [] - if '--user-base' in args: - buffer.append(USER_BASE) - if '--user-site' in args: - buffer.append(USER_SITE) - - if buffer: - print os.pathsep.join(buffer) - if ENABLE_USER_SITE: - sys.exit(0) - elif ENABLE_USER_SITE is False: - sys.exit(1) - elif ENABLE_USER_SITE is None: - sys.exit(2) - else: - sys.exit(3) - else: - import textwrap - print textwrap.dedent(help % (sys.argv[0], os.pathsep)) - sys.exit(10) - -if __name__ == '__main__': - _script() diff --git a/icarus-miner/data/usr/lib/python2.6/site.pyc b/icarus-miner/data/usr/lib/python2.6/site.pyc deleted file mode 100644 index 9ca8af1..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/site.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/socket.py b/icarus-miner/data/usr/lib/python2.6/socket.py deleted file mode 100644 index 596e30c..0000000 --- a/icarus-miner/data/usr/lib/python2.6/socket.py +++ /dev/null @@ -1,514 +0,0 @@ -# Wrapper module for _socket, providing some additional facilities -# implemented in Python. - -"""\ -This module provides socket operations and some related functions. -On Unix, it supports IP (Internet Protocol) and Unix domain sockets. -On other systems, it only supports IP. Functions specific for a -socket are available as methods of the socket object. - -Functions: - -socket() -- create a new socket object -socketpair() -- create a pair of new socket objects [*] -fromfd() -- create a socket object from an open file descriptor [*] -gethostname() -- return the current hostname -gethostbyname() -- map a hostname to its IP number -gethostbyaddr() -- map an IP number or hostname to DNS info -getservbyname() -- map a service name and a protocol name to a port number -getprotobyname() -- map a protocol name (e.g. 'tcp') to a number -ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order -htons(), htonl() -- convert 16, 32 bit int from host to network byte order -inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format -inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89) -ssl() -- secure socket layer support (only available if configured) -socket.getdefaulttimeout() -- get the default timeout value -socket.setdefaulttimeout() -- set the default timeout value -create_connection() -- connects to an address, with an optional timeout - - [*] not available on all platforms! - -Special objects: - -SocketType -- type object for socket objects -error -- exception raised for I/O errors -has_ipv6 -- boolean value indicating if IPv6 is supported - -Integer constants: - -AF_INET, AF_UNIX -- socket domains (first argument to socket() call) -SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument) - -Many other constants may be defined; these may be used in calls to -the setsockopt() and getsockopt() methods. -""" - -import _socket -from _socket import * - -try: - import _ssl -except ImportError: - # no SSL support - pass -else: - def ssl(sock, keyfile=None, certfile=None): - # we do an internal import here because the ssl - # module imports the socket module - import ssl as _realssl - warnings.warn("socket.ssl() is deprecated. Use ssl.wrap_socket() instead.", - DeprecationWarning, stacklevel=2) - return _realssl.sslwrap_simple(sock, keyfile, certfile) - - # we need to import the same constants we used to... - from _ssl import SSLError as sslerror - from _ssl import \ - RAND_add, \ - RAND_egd, \ - RAND_status, \ - SSL_ERROR_ZERO_RETURN, \ - SSL_ERROR_WANT_READ, \ - SSL_ERROR_WANT_WRITE, \ - SSL_ERROR_WANT_X509_LOOKUP, \ - SSL_ERROR_SYSCALL, \ - SSL_ERROR_SSL, \ - SSL_ERROR_WANT_CONNECT, \ - SSL_ERROR_EOF, \ - SSL_ERROR_INVALID_ERROR_CODE - -import os, sys, warnings - -try: - from cStringIO import StringIO -except ImportError: - from StringIO import StringIO - -try: - from errno import EBADF -except ImportError: - EBADF = 9 - -__all__ = ["getfqdn", "create_connection"] -__all__.extend(os._get_exports_list(_socket)) - - -_realsocket = socket - -# WSA error codes -if sys.platform.lower().startswith("win"): - errorTab = {} - errorTab[10004] = "The operation was interrupted." - errorTab[10009] = "A bad file handle was passed." - errorTab[10013] = "Permission denied." - errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT - errorTab[10022] = "An invalid operation was attempted." - errorTab[10035] = "The socket operation would block" - errorTab[10036] = "A blocking operation is already in progress." - errorTab[10048] = "The network address is in use." - errorTab[10054] = "The connection has been reset." - errorTab[10058] = "The network has been shut down." - errorTab[10060] = "The operation timed out." - errorTab[10061] = "Connection refused." - errorTab[10063] = "The name is too long." - errorTab[10064] = "The host is down." - errorTab[10065] = "The host is unreachable." - __all__.append("errorTab") - - - -def getfqdn(name=''): - """Get fully qualified domain name from name. - - An empty argument is interpreted as meaning the local host. - - First the hostname returned by gethostbyaddr() is checked, then - possibly existing aliases. In case no FQDN is available, hostname - from gethostname() is returned. - """ - name = name.strip() - if not name or name == '0.0.0.0': - name = gethostname() - try: - hostname, aliases, ipaddrs = gethostbyaddr(name) - except error: - pass - else: - aliases.insert(0, hostname) - for name in aliases: - if '.' in name: - break - else: - name = hostname - return name - - -_socketmethods = ( - 'bind', 'connect', 'connect_ex', 'fileno', 'listen', - 'getpeername', 'getsockname', 'getsockopt', 'setsockopt', - 'sendall', 'setblocking', - 'settimeout', 'gettimeout', 'shutdown') - -if os.name == "nt": - _socketmethods = _socketmethods + ('ioctl',) - -if sys.platform == "riscos": - _socketmethods = _socketmethods + ('sleeptaskw',) - -# All the method names that must be delegated to either the real socket -# object or the _closedsocket object. -_delegate_methods = ("recv", "recvfrom", "recv_into", "recvfrom_into", - "send", "sendto") - -class _closedsocket(object): - __slots__ = [] - def _dummy(*args): - raise error(EBADF, 'Bad file descriptor') - # All _delegate_methods must also be initialized here. - send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy - __getattr__ = _dummy - -# Wrapper around platform socket objects. This implements -# a platform-independent dup() functionality. The -# implementation currently relies on reference counting -# to close the underlying socket object. -class _socketobject(object): - - __doc__ = _realsocket.__doc__ - - __slots__ = ["_sock", "__weakref__"] + list(_delegate_methods) - - def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): - if _sock is None: - _sock = _realsocket(family, type, proto) - self._sock = _sock - for method in _delegate_methods: - setattr(self, method, getattr(_sock, method)) - - def close(self): - self._sock = _closedsocket() - dummy = self._sock._dummy - for method in _delegate_methods: - setattr(self, method, dummy) - close.__doc__ = _realsocket.close.__doc__ - - def accept(self): - sock, addr = self._sock.accept() - return _socketobject(_sock=sock), addr - accept.__doc__ = _realsocket.accept.__doc__ - - def dup(self): - """dup() -> socket object - - Return a new socket object connected to the same system resource.""" - return _socketobject(_sock=self._sock) - - def makefile(self, mode='r', bufsize=-1): - """makefile([mode[, bufsize]]) -> file object - - Return a regular file object corresponding to the socket. The mode - and bufsize arguments are as for the built-in open() function.""" - return _fileobject(self._sock, mode, bufsize) - - family = property(lambda self: self._sock.family, doc="the socket family") - type = property(lambda self: self._sock.type, doc="the socket type") - proto = property(lambda self: self._sock.proto, doc="the socket protocol") - - _s = ("def %s(self, *args): return self._sock.%s(*args)\n\n" - "%s.__doc__ = _realsocket.%s.__doc__\n") - for _m in _socketmethods: - exec _s % (_m, _m, _m, _m) - del _m, _s - -socket = SocketType = _socketobject - -class _fileobject(object): - """Faux file object attached to a socket object.""" - - default_bufsize = 8192 - name = "" - - __slots__ = ["mode", "bufsize", "softspace", - # "closed" is a property, see below - "_sock", "_rbufsize", "_wbufsize", "_rbuf", "_wbuf", "_wbuf_len", - "_close"] - - def __init__(self, sock, mode='rb', bufsize=-1, close=False): - self._sock = sock - self.mode = mode # Not actually used in this version - if bufsize < 0: - bufsize = self.default_bufsize - self.bufsize = bufsize - self.softspace = False - # _rbufsize is the suggested recv buffer size. It is *strictly* - # obeyed within readline() for recv calls. If it is larger than - # default_bufsize it will be used for recv calls within read(). - if bufsize == 0: - self._rbufsize = 1 - elif bufsize == 1: - self._rbufsize = self.default_bufsize - else: - self._rbufsize = bufsize - self._wbufsize = bufsize - # We use StringIO for the read buffer to avoid holding a list - # of variously sized string objects which have been known to - # fragment the heap due to how they are malloc()ed and often - # realloc()ed down much smaller than their original allocation. - self._rbuf = StringIO() - self._wbuf = [] # A list of strings - self._wbuf_len = 0 - self._close = close - - def _getclosed(self): - return self._sock is None - closed = property(_getclosed, doc="True if the file is closed") - - def close(self): - try: - if self._sock: - self.flush() - finally: - if self._close: - self._sock.close() - self._sock = None - - def __del__(self): - try: - self.close() - except: - # close() may fail if __init__ didn't complete - pass - - def flush(self): - if self._wbuf: - buffer = "".join(self._wbuf) - self._wbuf = [] - self._wbuf_len = 0 - self._sock.sendall(buffer) - - def fileno(self): - return self._sock.fileno() - - def write(self, data): - data = str(data) # XXX Should really reject non-string non-buffers - if not data: - return - self._wbuf.append(data) - self._wbuf_len += len(data) - if (self._wbufsize == 0 or - self._wbufsize == 1 and '\n' in data or - self._wbuf_len >= self._wbufsize): - self.flush() - - def writelines(self, list): - # XXX We could do better here for very long lists - # XXX Should really reject non-string non-buffers - lines = filter(None, map(str, list)) - self._wbuf_len += sum(map(len, lines)) - self._wbuf.extend(lines) - if (self._wbufsize <= 1 or - self._wbuf_len >= self._wbufsize): - self.flush() - - def _get_wbuf_len(self): - return self._wbuf_len - - def read(self, size=-1): - # Use max, disallow tiny reads in a loop as they are very inefficient. - # We never leave read() with any leftover data from a new recv() call - # in our internal buffer. - rbufsize = max(self._rbufsize, self.default_bufsize) - # Our use of StringIO rather than lists of string objects returned by - # recv() minimizes memory usage and fragmentation that occurs when - # rbufsize is large compared to the typical return value of recv(). - buf = self._rbuf - buf.seek(0, 2) # seek end - if size < 0: - # Read until EOF - self._rbuf = StringIO() # reset _rbuf. we consume it via buf. - while True: - data = self._sock.recv(rbufsize) - if not data: - break - buf.write(data) - return buf.getvalue() - else: - # Read until size bytes or EOF seen, whichever comes first - buf_len = buf.tell() - if buf_len >= size: - # Already have size bytes in our buffer? Extract and return. - buf.seek(0) - rv = buf.read(size) - self._rbuf = StringIO() - self._rbuf.write(buf.read()) - return rv - - self._rbuf = StringIO() # reset _rbuf. we consume it via buf. - while True: - left = size - buf_len - # recv() will malloc the amount of memory given as its - # parameter even though it often returns much less data - # than that. The returned data string is short lived - # as we copy it into a StringIO and free it. This avoids - # fragmentation issues on many platforms. - data = self._sock.recv(left) - if not data: - break - n = len(data) - if n == size and not buf_len: - # Shortcut. Avoid buffer data copies when: - # - We have no data in our buffer. - # AND - # - Our call to recv returned exactly the - # number of bytes we were asked to read. - return data - if n == left: - buf.write(data) - del data # explicit free - break - assert n <= left, "recv(%d) returned %d bytes" % (left, n) - buf.write(data) - buf_len += n - del data # explicit free - #assert buf_len == buf.tell() - return buf.getvalue() - - def readline(self, size=-1): - buf = self._rbuf - buf.seek(0, 2) # seek end - if buf.tell() > 0: - # check if we already have it in our buffer - buf.seek(0) - bline = buf.readline(size) - if bline.endswith('\n') or len(bline) == size: - self._rbuf = StringIO() - self._rbuf.write(buf.read()) - return bline - del bline - if size < 0: - # Read until \n or EOF, whichever comes first - if self._rbufsize <= 1: - # Speed up unbuffered case - buf.seek(0) - buffers = [buf.read()] - self._rbuf = StringIO() # reset _rbuf. we consume it via buf. - data = None - recv = self._sock.recv - while data != "\n": - data = recv(1) - if not data: - break - buffers.append(data) - return "".join(buffers) - - buf.seek(0, 2) # seek end - self._rbuf = StringIO() # reset _rbuf. we consume it via buf. - while True: - data = self._sock.recv(self._rbufsize) - if not data: - break - nl = data.find('\n') - if nl >= 0: - nl += 1 - buf.write(data[:nl]) - self._rbuf.write(data[nl:]) - del data - break - buf.write(data) - return buf.getvalue() - else: - # Read until size bytes or \n or EOF seen, whichever comes first - buf.seek(0, 2) # seek end - buf_len = buf.tell() - if buf_len >= size: - buf.seek(0) - rv = buf.read(size) - self._rbuf = StringIO() - self._rbuf.write(buf.read()) - return rv - self._rbuf = StringIO() # reset _rbuf. we consume it via buf. - while True: - data = self._sock.recv(self._rbufsize) - if not data: - break - left = size - buf_len - # did we just receive a newline? - nl = data.find('\n', 0, left) - if nl >= 0: - nl += 1 - # save the excess data to _rbuf - self._rbuf.write(data[nl:]) - if buf_len: - buf.write(data[:nl]) - break - else: - # Shortcut. Avoid data copy through buf when returning - # a substring of our first recv(). - return data[:nl] - n = len(data) - if n == size and not buf_len: - # Shortcut. Avoid data copy through buf when - # returning exactly all of our first recv(). - return data - if n >= left: - buf.write(data[:left]) - self._rbuf.write(data[left:]) - break - buf.write(data) - buf_len += n - #assert buf_len == buf.tell() - return buf.getvalue() - - def readlines(self, sizehint=0): - total = 0 - list = [] - while True: - line = self.readline() - if not line: - break - list.append(line) - total += len(line) - if sizehint and total >= sizehint: - break - return list - - # Iterator protocols - - def __iter__(self): - return self - - def next(self): - line = self.readline() - if not line: - raise StopIteration - return line - -_GLOBAL_DEFAULT_TIMEOUT = object() - -def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT): - """Connect to *address* and return the socket object. - - Convenience function. Connect to *address* (a 2-tuple ``(host, - port)``) and return the socket object. Passing the optional - *timeout* parameter will set the timeout on the socket instance - before attempting to connect. If no *timeout* is supplied, the - global default timeout setting returned by :func:`getdefaulttimeout` - is used. - """ - - msg = "getaddrinfo returns an empty list" - host, port = address - for res in getaddrinfo(host, port, 0, SOCK_STREAM): - af, socktype, proto, canonname, sa = res - sock = None - try: - sock = socket(af, socktype, proto) - if timeout is not _GLOBAL_DEFAULT_TIMEOUT: - sock.settimeout(timeout) - sock.connect(sa) - return sock - - except error, msg: - if sock is not None: - sock.close() - - raise error, msg diff --git a/icarus-miner/data/usr/lib/python2.6/socket.pyc b/icarus-miner/data/usr/lib/python2.6/socket.pyc deleted file mode 100644 index 76e23ef..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/socket.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/sre_compile.py b/icarus-miner/data/usr/lib/python2.6/sre_compile.py deleted file mode 100644 index 65d2ccf..0000000 --- a/icarus-miner/data/usr/lib/python2.6/sre_compile.py +++ /dev/null @@ -1,530 +0,0 @@ -# -# Secret Labs' Regular Expression Engine -# -# convert template to internal format -# -# Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved. -# -# See the sre.py file for information on usage and redistribution. -# - -"""Internal support module for sre""" - -import _sre, sys -import sre_parse -from sre_constants import * - -assert _sre.MAGIC == MAGIC, "SRE module mismatch" - -if _sre.CODESIZE == 2: - MAXCODE = 65535 -else: - MAXCODE = 0xFFFFFFFFL - -def _identityfunction(x): - return x - -def set(seq): - s = {} - for elem in seq: - s[elem] = 1 - return s - -_LITERAL_CODES = set([LITERAL, NOT_LITERAL]) -_REPEATING_CODES = set([REPEAT, MIN_REPEAT, MAX_REPEAT]) -_SUCCESS_CODES = set([SUCCESS, FAILURE]) -_ASSERT_CODES = set([ASSERT, ASSERT_NOT]) - -def _compile(code, pattern, flags): - # internal: compile a (sub)pattern - emit = code.append - _len = len - LITERAL_CODES = _LITERAL_CODES - REPEATING_CODES = _REPEATING_CODES - SUCCESS_CODES = _SUCCESS_CODES - ASSERT_CODES = _ASSERT_CODES - for op, av in pattern: - if op in LITERAL_CODES: - if flags & SRE_FLAG_IGNORECASE: - emit(OPCODES[OP_IGNORE[op]]) - emit(_sre.getlower(av, flags)) - else: - emit(OPCODES[op]) - emit(av) - elif op is IN: - if flags & SRE_FLAG_IGNORECASE: - emit(OPCODES[OP_IGNORE[op]]) - def fixup(literal, flags=flags): - return _sre.getlower(literal, flags) - else: - emit(OPCODES[op]) - fixup = _identityfunction - skip = _len(code); emit(0) - _compile_charset(av, flags, code, fixup) - code[skip] = _len(code) - skip - elif op is ANY: - if flags & SRE_FLAG_DOTALL: - emit(OPCODES[ANY_ALL]) - else: - emit(OPCODES[ANY]) - elif op in REPEATING_CODES: - if flags & SRE_FLAG_TEMPLATE: - raise error, "internal: unsupported template operator" - emit(OPCODES[REPEAT]) - skip = _len(code); emit(0) - emit(av[0]) - emit(av[1]) - _compile(code, av[2], flags) - emit(OPCODES[SUCCESS]) - code[skip] = _len(code) - skip - elif _simple(av) and op is not REPEAT: - if op is MAX_REPEAT: - emit(OPCODES[REPEAT_ONE]) - else: - emit(OPCODES[MIN_REPEAT_ONE]) - skip = _len(code); emit(0) - emit(av[0]) - emit(av[1]) - _compile(code, av[2], flags) - emit(OPCODES[SUCCESS]) - code[skip] = _len(code) - skip - else: - emit(OPCODES[REPEAT]) - skip = _len(code); emit(0) - emit(av[0]) - emit(av[1]) - _compile(code, av[2], flags) - code[skip] = _len(code) - skip - if op is MAX_REPEAT: - emit(OPCODES[MAX_UNTIL]) - else: - emit(OPCODES[MIN_UNTIL]) - elif op is SUBPATTERN: - if av[0]: - emit(OPCODES[MARK]) - emit((av[0]-1)*2) - # _compile_info(code, av[1], flags) - _compile(code, av[1], flags) - if av[0]: - emit(OPCODES[MARK]) - emit((av[0]-1)*2+1) - elif op in SUCCESS_CODES: - emit(OPCODES[op]) - elif op in ASSERT_CODES: - emit(OPCODES[op]) - skip = _len(code); emit(0) - if av[0] >= 0: - emit(0) # look ahead - else: - lo, hi = av[1].getwidth() - if lo != hi: - raise error, "look-behind requires fixed-width pattern" - emit(lo) # look behind - _compile(code, av[1], flags) - emit(OPCODES[SUCCESS]) - code[skip] = _len(code) - skip - elif op is CALL: - emit(OPCODES[op]) - skip = _len(code); emit(0) - _compile(code, av, flags) - emit(OPCODES[SUCCESS]) - code[skip] = _len(code) - skip - elif op is AT: - emit(OPCODES[op]) - if flags & SRE_FLAG_MULTILINE: - av = AT_MULTILINE.get(av, av) - if flags & SRE_FLAG_LOCALE: - av = AT_LOCALE.get(av, av) - elif flags & SRE_FLAG_UNICODE: - av = AT_UNICODE.get(av, av) - emit(ATCODES[av]) - elif op is BRANCH: - emit(OPCODES[op]) - tail = [] - tailappend = tail.append - for av in av[1]: - skip = _len(code); emit(0) - # _compile_info(code, av, flags) - _compile(code, av, flags) - emit(OPCODES[JUMP]) - tailappend(_len(code)); emit(0) - code[skip] = _len(code) - skip - emit(0) # end of branch - for tail in tail: - code[tail] = _len(code) - tail - elif op is CATEGORY: - emit(OPCODES[op]) - if flags & SRE_FLAG_LOCALE: - av = CH_LOCALE[av] - elif flags & SRE_FLAG_UNICODE: - av = CH_UNICODE[av] - emit(CHCODES[av]) - elif op is GROUPREF: - if flags & SRE_FLAG_IGNORECASE: - emit(OPCODES[OP_IGNORE[op]]) - else: - emit(OPCODES[op]) - emit(av-1) - elif op is GROUPREF_EXISTS: - emit(OPCODES[op]) - emit(av[0]-1) - skipyes = _len(code); emit(0) - _compile(code, av[1], flags) - if av[2]: - emit(OPCODES[JUMP]) - skipno = _len(code); emit(0) - code[skipyes] = _len(code) - skipyes + 1 - _compile(code, av[2], flags) - code[skipno] = _len(code) - skipno - else: - code[skipyes] = _len(code) - skipyes + 1 - else: - raise ValueError, ("unsupported operand type", op) - -def _compile_charset(charset, flags, code, fixup=None): - # compile charset subprogram - emit = code.append - if fixup is None: - fixup = _identityfunction - for op, av in _optimize_charset(charset, fixup): - emit(OPCODES[op]) - if op is NEGATE: - pass - elif op is LITERAL: - emit(fixup(av)) - elif op is RANGE: - emit(fixup(av[0])) - emit(fixup(av[1])) - elif op is CHARSET: - code.extend(av) - elif op is BIGCHARSET: - code.extend(av) - elif op is CATEGORY: - if flags & SRE_FLAG_LOCALE: - emit(CHCODES[CH_LOCALE[av]]) - elif flags & SRE_FLAG_UNICODE: - emit(CHCODES[CH_UNICODE[av]]) - else: - emit(CHCODES[av]) - else: - raise error, "internal: unsupported set operator" - emit(OPCODES[FAILURE]) - -def _optimize_charset(charset, fixup): - # internal: optimize character set - out = [] - outappend = out.append - charmap = [0]*256 - try: - for op, av in charset: - if op is NEGATE: - outappend((op, av)) - elif op is LITERAL: - charmap[fixup(av)] = 1 - elif op is RANGE: - for i in range(fixup(av[0]), fixup(av[1])+1): - charmap[i] = 1 - elif op is CATEGORY: - # XXX: could append to charmap tail - return charset # cannot compress - except IndexError: - # character set contains unicode characters - return _optimize_unicode(charset, fixup) - # compress character map - i = p = n = 0 - runs = [] - runsappend = runs.append - for c in charmap: - if c: - if n == 0: - p = i - n = n + 1 - elif n: - runsappend((p, n)) - n = 0 - i = i + 1 - if n: - runsappend((p, n)) - if len(runs) <= 2: - # use literal/range - for p, n in runs: - if n == 1: - outappend((LITERAL, p)) - else: - outappend((RANGE, (p, p+n-1))) - if len(out) < len(charset): - return out - else: - # use bitmap - data = _mk_bitmap(charmap) - outappend((CHARSET, data)) - return out - return charset - -def _mk_bitmap(bits): - data = [] - dataappend = data.append - if _sre.CODESIZE == 2: - start = (1, 0) - else: - start = (1L, 0L) - m, v = start - for c in bits: - if c: - v = v + m - m = m + m - if m > MAXCODE: - dataappend(v) - m, v = start - return data - -# To represent a big charset, first a bitmap of all characters in the -# set is constructed. Then, this bitmap is sliced into chunks of 256 -# characters, duplicate chunks are eliminated, and each chunk is -# given a number. In the compiled expression, the charset is -# represented by a 16-bit word sequence, consisting of one word for -# the number of different chunks, a sequence of 256 bytes (128 words) -# of chunk numbers indexed by their original chunk position, and a -# sequence of chunks (16 words each). - -# Compression is normally good: in a typical charset, large ranges of -# Unicode will be either completely excluded (e.g. if only cyrillic -# letters are to be matched), or completely included (e.g. if large -# subranges of Kanji match). These ranges will be represented by -# chunks of all one-bits or all zero-bits. - -# Matching can be also done efficiently: the more significant byte of -# the Unicode character is an index into the chunk number, and the -# less significant byte is a bit index in the chunk (just like the -# CHARSET matching). - -# In UCS-4 mode, the BIGCHARSET opcode still supports only subsets -# of the basic multilingual plane; an efficient representation -# for all of UTF-16 has not yet been developed. This means, -# in particular, that negated charsets cannot be represented as -# bigcharsets. - -def _optimize_unicode(charset, fixup): - try: - import array - except ImportError: - return charset - charmap = [0]*65536 - negate = 0 - try: - for op, av in charset: - if op is NEGATE: - negate = 1 - elif op is LITERAL: - charmap[fixup(av)] = 1 - elif op is RANGE: - for i in xrange(fixup(av[0]), fixup(av[1])+1): - charmap[i] = 1 - elif op is CATEGORY: - # XXX: could expand category - return charset # cannot compress - except IndexError: - # non-BMP characters - return charset - if negate: - if sys.maxunicode != 65535: - # XXX: negation does not work with big charsets - return charset - for i in xrange(65536): - charmap[i] = not charmap[i] - comps = {} - mapping = [0]*256 - block = 0 - data = [] - for i in xrange(256): - chunk = tuple(charmap[i*256:(i+1)*256]) - new = comps.setdefault(chunk, block) - mapping[i] = new - if new == block: - block = block + 1 - data = data + _mk_bitmap(chunk) - header = [block] - if _sre.CODESIZE == 2: - code = 'H' - else: - code = 'I' - # Convert block indices to byte array of 256 bytes - mapping = array.array('b', mapping).tostring() - # Convert byte array to word array - mapping = array.array(code, mapping) - assert mapping.itemsize == _sre.CODESIZE - header = header + mapping.tolist() - data[0:0] = header - return [(BIGCHARSET, data)] - -def _simple(av): - # check if av is a "simple" operator - lo, hi = av[2].getwidth() - if lo == 0 and hi == MAXREPEAT: - raise error, "nothing to repeat" - return lo == hi == 1 and av[2][0][0] != SUBPATTERN - -def _compile_info(code, pattern, flags): - # internal: compile an info block. in the current version, - # this contains min/max pattern width, and an optional literal - # prefix or a character map - lo, hi = pattern.getwidth() - if lo == 0: - return # not worth it - # look for a literal prefix - prefix = [] - prefixappend = prefix.append - prefix_skip = 0 - charset = [] # not used - charsetappend = charset.append - if not (flags & SRE_FLAG_IGNORECASE): - # look for literal prefix - for op, av in pattern.data: - if op is LITERAL: - if len(prefix) == prefix_skip: - prefix_skip = prefix_skip + 1 - prefixappend(av) - elif op is SUBPATTERN and len(av[1]) == 1: - op, av = av[1][0] - if op is LITERAL: - prefixappend(av) - else: - break - else: - break - # if no prefix, look for charset prefix - if not prefix and pattern.data: - op, av = pattern.data[0] - if op is SUBPATTERN and av[1]: - op, av = av[1][0] - if op is LITERAL: - charsetappend((op, av)) - elif op is BRANCH: - c = [] - cappend = c.append - for p in av[1]: - if not p: - break - op, av = p[0] - if op is LITERAL: - cappend((op, av)) - else: - break - else: - charset = c - elif op is BRANCH: - c = [] - cappend = c.append - for p in av[1]: - if not p: - break - op, av = p[0] - if op is LITERAL: - cappend((op, av)) - else: - break - else: - charset = c - elif op is IN: - charset = av -## if prefix: -## print "*** PREFIX", prefix, prefix_skip -## if charset: -## print "*** CHARSET", charset - # add an info block - emit = code.append - emit(OPCODES[INFO]) - skip = len(code); emit(0) - # literal flag - mask = 0 - if prefix: - mask = SRE_INFO_PREFIX - if len(prefix) == prefix_skip == len(pattern.data): - mask = mask + SRE_INFO_LITERAL - elif charset: - mask = mask + SRE_INFO_CHARSET - emit(mask) - # pattern length - if lo < MAXCODE: - emit(lo) - else: - emit(MAXCODE) - prefix = prefix[:MAXCODE] - if hi < MAXCODE: - emit(hi) - else: - emit(0) - # add literal prefix - if prefix: - emit(len(prefix)) # length - emit(prefix_skip) # skip - code.extend(prefix) - # generate overlap table - table = [-1] + ([0]*len(prefix)) - for i in xrange(len(prefix)): - table[i+1] = table[i]+1 - while table[i+1] > 0 and prefix[i] != prefix[table[i+1]-1]: - table[i+1] = table[table[i+1]-1]+1 - code.extend(table[1:]) # don't store first entry - elif charset: - _compile_charset(charset, flags, code) - code[skip] = len(code) - skip - -try: - unicode -except NameError: - STRING_TYPES = (type(""),) -else: - STRING_TYPES = (type(""), type(unicode(""))) - -def isstring(obj): - for tp in STRING_TYPES: - if isinstance(obj, tp): - return 1 - return 0 - -def _code(p, flags): - - flags = p.pattern.flags | flags - code = [] - - # compile info block - _compile_info(code, p, flags) - - # compile the pattern - _compile(code, p.data, flags) - - code.append(OPCODES[SUCCESS]) - - return code - -def compile(p, flags=0): - # internal: convert pattern list to internal format - - if isstring(p): - pattern = p - p = sre_parse.parse(p, flags) - else: - pattern = None - - code = _code(p, flags) - - # print code - - # XXX: get rid of this limitation! - if p.pattern.groups > 100: - raise AssertionError( - "sorry, but this version only supports 100 named groups" - ) - - # map in either direction - groupindex = p.pattern.groupdict - indexgroup = [None] * p.pattern.groups - for k, i in groupindex.items(): - indexgroup[i] = k - - return _sre.compile( - pattern, flags | p.pattern.flags, code, - p.pattern.groups-1, - groupindex, indexgroup - ) diff --git a/icarus-miner/data/usr/lib/python2.6/sre_compile.pyc b/icarus-miner/data/usr/lib/python2.6/sre_compile.pyc deleted file mode 100644 index be17df5..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/sre_compile.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/sre_constants.py b/icarus-miner/data/usr/lib/python2.6/sre_constants.py deleted file mode 100644 index 1863f48..0000000 --- a/icarus-miner/data/usr/lib/python2.6/sre_constants.py +++ /dev/null @@ -1,261 +0,0 @@ -# -# Secret Labs' Regular Expression Engine -# -# various symbols used by the regular expression engine. -# run this script to update the _sre include files! -# -# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. -# -# See the sre.py file for information on usage and redistribution. -# - -"""Internal support module for sre""" - -# update when constants are added or removed - -MAGIC = 20031017 - -# max code word in this release - -MAXREPEAT = 65535 - -# SRE standard exception (access as sre.error) -# should this really be here? - -class error(Exception): - pass - -# operators - -FAILURE = "failure" -SUCCESS = "success" - -ANY = "any" -ANY_ALL = "any_all" -ASSERT = "assert" -ASSERT_NOT = "assert_not" -AT = "at" -BIGCHARSET = "bigcharset" -BRANCH = "branch" -CALL = "call" -CATEGORY = "category" -CHARSET = "charset" -GROUPREF = "groupref" -GROUPREF_IGNORE = "groupref_ignore" -GROUPREF_EXISTS = "groupref_exists" -IN = "in" -IN_IGNORE = "in_ignore" -INFO = "info" -JUMP = "jump" -LITERAL = "literal" -LITERAL_IGNORE = "literal_ignore" -MARK = "mark" -MAX_REPEAT = "max_repeat" -MAX_UNTIL = "max_until" -MIN_REPEAT = "min_repeat" -MIN_UNTIL = "min_until" -NEGATE = "negate" -NOT_LITERAL = "not_literal" -NOT_LITERAL_IGNORE = "not_literal_ignore" -RANGE = "range" -REPEAT = "repeat" -REPEAT_ONE = "repeat_one" -SUBPATTERN = "subpattern" -MIN_REPEAT_ONE = "min_repeat_one" - -# positions -AT_BEGINNING = "at_beginning" -AT_BEGINNING_LINE = "at_beginning_line" -AT_BEGINNING_STRING = "at_beginning_string" -AT_BOUNDARY = "at_boundary" -AT_NON_BOUNDARY = "at_non_boundary" -AT_END = "at_end" -AT_END_LINE = "at_end_line" -AT_END_STRING = "at_end_string" -AT_LOC_BOUNDARY = "at_loc_boundary" -AT_LOC_NON_BOUNDARY = "at_loc_non_boundary" -AT_UNI_BOUNDARY = "at_uni_boundary" -AT_UNI_NON_BOUNDARY = "at_uni_non_boundary" - -# categories -CATEGORY_DIGIT = "category_digit" -CATEGORY_NOT_DIGIT = "category_not_digit" -CATEGORY_SPACE = "category_space" -CATEGORY_NOT_SPACE = "category_not_space" -CATEGORY_WORD = "category_word" -CATEGORY_NOT_WORD = "category_not_word" -CATEGORY_LINEBREAK = "category_linebreak" -CATEGORY_NOT_LINEBREAK = "category_not_linebreak" -CATEGORY_LOC_WORD = "category_loc_word" -CATEGORY_LOC_NOT_WORD = "category_loc_not_word" -CATEGORY_UNI_DIGIT = "category_uni_digit" -CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit" -CATEGORY_UNI_SPACE = "category_uni_space" -CATEGORY_UNI_NOT_SPACE = "category_uni_not_space" -CATEGORY_UNI_WORD = "category_uni_word" -CATEGORY_UNI_NOT_WORD = "category_uni_not_word" -CATEGORY_UNI_LINEBREAK = "category_uni_linebreak" -CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak" - -OPCODES = [ - - # failure=0 success=1 (just because it looks better that way :-) - FAILURE, SUCCESS, - - ANY, ANY_ALL, - ASSERT, ASSERT_NOT, - AT, - BRANCH, - CALL, - CATEGORY, - CHARSET, BIGCHARSET, - GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE, - IN, IN_IGNORE, - INFO, - JUMP, - LITERAL, LITERAL_IGNORE, - MARK, - MAX_UNTIL, - MIN_UNTIL, - NOT_LITERAL, NOT_LITERAL_IGNORE, - NEGATE, - RANGE, - REPEAT, - REPEAT_ONE, - SUBPATTERN, - MIN_REPEAT_ONE - -] - -ATCODES = [ - AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY, - AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING, - AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY, - AT_UNI_NON_BOUNDARY -] - -CHCODES = [ - CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE, - CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD, - CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD, - CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT, - CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD, - CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK, - CATEGORY_UNI_NOT_LINEBREAK -] - -def makedict(list): - d = {} - i = 0 - for item in list: - d[item] = i - i = i + 1 - return d - -OPCODES = makedict(OPCODES) -ATCODES = makedict(ATCODES) -CHCODES = makedict(CHCODES) - -# replacement operations for "ignore case" mode -OP_IGNORE = { - GROUPREF: GROUPREF_IGNORE, - IN: IN_IGNORE, - LITERAL: LITERAL_IGNORE, - NOT_LITERAL: NOT_LITERAL_IGNORE -} - -AT_MULTILINE = { - AT_BEGINNING: AT_BEGINNING_LINE, - AT_END: AT_END_LINE -} - -AT_LOCALE = { - AT_BOUNDARY: AT_LOC_BOUNDARY, - AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY -} - -AT_UNICODE = { - AT_BOUNDARY: AT_UNI_BOUNDARY, - AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY -} - -CH_LOCALE = { - CATEGORY_DIGIT: CATEGORY_DIGIT, - CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT, - CATEGORY_SPACE: CATEGORY_SPACE, - CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE, - CATEGORY_WORD: CATEGORY_LOC_WORD, - CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD, - CATEGORY_LINEBREAK: CATEGORY_LINEBREAK, - CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK -} - -CH_UNICODE = { - CATEGORY_DIGIT: CATEGORY_UNI_DIGIT, - CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT, - CATEGORY_SPACE: CATEGORY_UNI_SPACE, - CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE, - CATEGORY_WORD: CATEGORY_UNI_WORD, - CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD, - CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK, - CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK -} - -# flags -SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking) -SRE_FLAG_IGNORECASE = 2 # case insensitive -SRE_FLAG_LOCALE = 4 # honour system locale -SRE_FLAG_MULTILINE = 8 # treat target as multiline string -SRE_FLAG_DOTALL = 16 # treat target as a single string -SRE_FLAG_UNICODE = 32 # use unicode locale -SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments -SRE_FLAG_DEBUG = 128 # debugging - -# flags for INFO primitive -SRE_INFO_PREFIX = 1 # has prefix -SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix) -SRE_INFO_CHARSET = 4 # pattern starts with character from given set - -if __name__ == "__main__": - def dump(f, d, prefix): - items = d.items() - items.sort(key=lambda a: a[1]) - for k, v in items: - f.write("#define %s_%s %s\n" % (prefix, k.upper(), v)) - f = open("sre_constants.h", "w") - f.write("""\ -/* - * Secret Labs' Regular Expression Engine - * - * regular expression matching engine - * - * NOTE: This file is generated by sre_constants.py. If you need - * to change anything in here, edit sre_constants.py and run it. - * - * Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved. - * - * See the _sre.c file for information on usage and redistribution. - */ - -""") - - f.write("#define SRE_MAGIC %d\n" % MAGIC) - - dump(f, OPCODES, "SRE_OP") - dump(f, ATCODES, "SRE") - dump(f, CHCODES, "SRE") - - f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE) - f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE) - f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE) - f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE) - f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL) - f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE) - f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE) - - f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX) - f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL) - f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET) - - f.close() - print "done" diff --git a/icarus-miner/data/usr/lib/python2.6/sre_constants.pyc b/icarus-miner/data/usr/lib/python2.6/sre_constants.pyc deleted file mode 100644 index 1c9a85a..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/sre_constants.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/sre_parse.py b/icarus-miner/data/usr/lib/python2.6/sre_parse.py deleted file mode 100644 index 4781019..0000000 --- a/icarus-miner/data/usr/lib/python2.6/sre_parse.py +++ /dev/null @@ -1,796 +0,0 @@ -# -# Secret Labs' Regular Expression Engine -# -# convert re-style regular expression to sre pattern -# -# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. -# -# See the sre.py file for information on usage and redistribution. -# - -"""Internal support module for sre""" - -# XXX: show string offset and offending character for all errors - -import sys - -from sre_constants import * - -def set(seq): - s = {} - for elem in seq: - s[elem] = 1 - return s - -SPECIAL_CHARS = ".\\[{()*+?^$|" -REPEAT_CHARS = "*+?{" - -DIGITS = set("0123456789") - -OCTDIGITS = set("01234567") -HEXDIGITS = set("0123456789abcdefABCDEF") - -WHITESPACE = set(" \t\n\r\v\f") - -ESCAPES = { - r"\a": (LITERAL, ord("\a")), - r"\b": (LITERAL, ord("\b")), - r"\f": (LITERAL, ord("\f")), - r"\n": (LITERAL, ord("\n")), - r"\r": (LITERAL, ord("\r")), - r"\t": (LITERAL, ord("\t")), - r"\v": (LITERAL, ord("\v")), - r"\\": (LITERAL, ord("\\")) -} - -CATEGORIES = { - r"\A": (AT, AT_BEGINNING_STRING), # start of string - r"\b": (AT, AT_BOUNDARY), - r"\B": (AT, AT_NON_BOUNDARY), - r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]), - r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]), - r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]), - r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]), - r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]), - r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]), - r"\Z": (AT, AT_END_STRING), # end of string -} - -FLAGS = { - # standard flags - "i": SRE_FLAG_IGNORECASE, - "L": SRE_FLAG_LOCALE, - "m": SRE_FLAG_MULTILINE, - "s": SRE_FLAG_DOTALL, - "x": SRE_FLAG_VERBOSE, - # extensions - "t": SRE_FLAG_TEMPLATE, - "u": SRE_FLAG_UNICODE, -} - -class Pattern: - # master pattern object. keeps track of global attributes - def __init__(self): - self.flags = 0 - self.open = [] - self.groups = 1 - self.groupdict = {} - def opengroup(self, name=None): - gid = self.groups - self.groups = gid + 1 - if name is not None: - ogid = self.groupdict.get(name, None) - if ogid is not None: - raise error, ("redefinition of group name %s as group %d; " - "was group %d" % (repr(name), gid, ogid)) - self.groupdict[name] = gid - self.open.append(gid) - return gid - def closegroup(self, gid): - self.open.remove(gid) - def checkgroup(self, gid): - return gid < self.groups and gid not in self.open - -class SubPattern: - # a subpattern, in intermediate form - def __init__(self, pattern, data=None): - self.pattern = pattern - if data is None: - data = [] - self.data = data - self.width = None - def dump(self, level=0): - nl = 1 - seqtypes = type(()), type([]) - for op, av in self.data: - print level*" " + op,; nl = 0 - if op == "in": - # member sublanguage - print; nl = 1 - for op, a in av: - print (level+1)*" " + op, a - elif op == "branch": - print; nl = 1 - i = 0 - for a in av[1]: - if i > 0: - print level*" " + "or" - a.dump(level+1); nl = 1 - i = i + 1 - elif type(av) in seqtypes: - for a in av: - if isinstance(a, SubPattern): - if not nl: print - a.dump(level+1); nl = 1 - else: - print a, ; nl = 0 - else: - print av, ; nl = 0 - if not nl: print - def __repr__(self): - return repr(self.data) - def __len__(self): - return len(self.data) - def __delitem__(self, index): - del self.data[index] - def __getitem__(self, index): - if isinstance(index, slice): - return SubPattern(self.pattern, self.data[index]) - return self.data[index] - def __setitem__(self, index, code): - self.data[index] = code - def insert(self, index, code): - self.data.insert(index, code) - def append(self, code): - self.data.append(code) - def getwidth(self): - # determine the width (min, max) for this subpattern - if self.width: - return self.width - lo = hi = 0L - UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY) - REPEATCODES = (MIN_REPEAT, MAX_REPEAT) - for op, av in self.data: - if op is BRANCH: - i = sys.maxint - j = 0 - for av in av[1]: - l, h = av.getwidth() - i = min(i, l) - j = max(j, h) - lo = lo + i - hi = hi + j - elif op is CALL: - i, j = av.getwidth() - lo = lo + i - hi = hi + j - elif op is SUBPATTERN: - i, j = av[1].getwidth() - lo = lo + i - hi = hi + j - elif op in REPEATCODES: - i, j = av[2].getwidth() - lo = lo + long(i) * av[0] - hi = hi + long(j) * av[1] - elif op in UNITCODES: - lo = lo + 1 - hi = hi + 1 - elif op == SUCCESS: - break - self.width = int(min(lo, sys.maxint)), int(min(hi, sys.maxint)) - return self.width - -class Tokenizer: - def __init__(self, string): - self.string = string - self.index = 0 - self.__next() - def __next(self): - if self.index >= len(self.string): - self.next = None - return - char = self.string[self.index] - if char[0] == "\\": - try: - c = self.string[self.index + 1] - except IndexError: - raise error, "bogus escape (end of line)" - char = char + c - self.index = self.index + len(char) - self.next = char - def match(self, char, skip=1): - if char == self.next: - if skip: - self.__next() - return 1 - return 0 - def get(self): - this = self.next - self.__next() - return this - def tell(self): - return self.index, self.next - def seek(self, index): - self.index, self.next = index - -def isident(char): - return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_" - -def isdigit(char): - return "0" <= char <= "9" - -def isname(name): - # check that group name is a valid string - if not isident(name[0]): - return False - for char in name[1:]: - if not isident(char) and not isdigit(char): - return False - return True - -def _class_escape(source, escape): - # handle escape code inside character class - code = ESCAPES.get(escape) - if code: - return code - code = CATEGORIES.get(escape) - if code: - return code - try: - c = escape[1:2] - if c == "x": - # hexadecimal escape (exactly two digits) - while source.next in HEXDIGITS and len(escape) < 4: - escape = escape + source.get() - escape = escape[2:] - if len(escape) != 2: - raise error, "bogus escape: %s" % repr("\\" + escape) - return LITERAL, int(escape, 16) & 0xff - elif c in OCTDIGITS: - # octal escape (up to three digits) - while source.next in OCTDIGITS and len(escape) < 4: - escape = escape + source.get() - escape = escape[1:] - return LITERAL, int(escape, 8) & 0xff - elif c in DIGITS: - raise error, "bogus escape: %s" % repr(escape) - if len(escape) == 2: - return LITERAL, ord(escape[1]) - except ValueError: - pass - raise error, "bogus escape: %s" % repr(escape) - -def _escape(source, escape, state): - # handle escape code in expression - code = CATEGORIES.get(escape) - if code: - return code - code = ESCAPES.get(escape) - if code: - return code - try: - c = escape[1:2] - if c == "x": - # hexadecimal escape - while source.next in HEXDIGITS and len(escape) < 4: - escape = escape + source.get() - if len(escape) != 4: - raise ValueError - return LITERAL, int(escape[2:], 16) & 0xff - elif c == "0": - # octal escape - while source.next in OCTDIGITS and len(escape) < 4: - escape = escape + source.get() - return LITERAL, int(escape[1:], 8) & 0xff - elif c in DIGITS: - # octal escape *or* decimal group reference (sigh) - if source.next in DIGITS: - escape = escape + source.get() - if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and - source.next in OCTDIGITS): - # got three octal digits; this is an octal escape - escape = escape + source.get() - return LITERAL, int(escape[1:], 8) & 0xff - # not an octal escape, so this is a group reference - group = int(escape[1:]) - if group < state.groups: - if not state.checkgroup(group): - raise error, "cannot refer to open group" - return GROUPREF, group - raise ValueError - if len(escape) == 2: - return LITERAL, ord(escape[1]) - except ValueError: - pass - raise error, "bogus escape: %s" % repr(escape) - -def _parse_sub(source, state, nested=1): - # parse an alternation: a|b|c - - items = [] - itemsappend = items.append - sourcematch = source.match - while 1: - itemsappend(_parse(source, state)) - if sourcematch("|"): - continue - if not nested: - break - if not source.next or sourcematch(")", 0): - break - else: - raise error, "pattern not properly closed" - - if len(items) == 1: - return items[0] - - subpattern = SubPattern(state) - subpatternappend = subpattern.append - - # check if all items share a common prefix - while 1: - prefix = None - for item in items: - if not item: - break - if prefix is None: - prefix = item[0] - elif item[0] != prefix: - break - else: - # all subitems start with a common "prefix". - # move it out of the branch - for item in items: - del item[0] - subpatternappend(prefix) - continue # check next one - break - - # check if the branch can be replaced by a character set - for item in items: - if len(item) != 1 or item[0][0] != LITERAL: - break - else: - # we can store this as a character set instead of a - # branch (the compiler may optimize this even more) - set = [] - setappend = set.append - for item in items: - setappend(item[0]) - subpatternappend((IN, set)) - return subpattern - - subpattern.append((BRANCH, (None, items))) - return subpattern - -def _parse_sub_cond(source, state, condgroup): - item_yes = _parse(source, state) - if source.match("|"): - item_no = _parse(source, state) - if source.match("|"): - raise error, "conditional backref with more than two branches" - else: - item_no = None - if source.next and not source.match(")", 0): - raise error, "pattern not properly closed" - subpattern = SubPattern(state) - subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no))) - return subpattern - -_PATTERNENDERS = set("|)") -_ASSERTCHARS = set("=!<") -_LOOKBEHINDASSERTCHARS = set("=!") -_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT]) - -def _parse(source, state): - # parse a simple pattern - subpattern = SubPattern(state) - - # precompute constants into local variables - subpatternappend = subpattern.append - sourceget = source.get - sourcematch = source.match - _len = len - PATTERNENDERS = _PATTERNENDERS - ASSERTCHARS = _ASSERTCHARS - LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS - REPEATCODES = _REPEATCODES - - while 1: - - if source.next in PATTERNENDERS: - break # end of subpattern - this = sourceget() - if this is None: - break # end of pattern - - if state.flags & SRE_FLAG_VERBOSE: - # skip whitespace and comments - if this in WHITESPACE: - continue - if this == "#": - while 1: - this = sourceget() - if this in (None, "\n"): - break - continue - - if this and this[0] not in SPECIAL_CHARS: - subpatternappend((LITERAL, ord(this))) - - elif this == "[": - # character set - set = [] - setappend = set.append -## if sourcematch(":"): -## pass # handle character classes - if sourcematch("^"): - setappend((NEGATE, None)) - # check remaining characters - start = set[:] - while 1: - this = sourceget() - if this == "]" and set != start: - break - elif this and this[0] == "\\": - code1 = _class_escape(source, this) - elif this: - code1 = LITERAL, ord(this) - else: - raise error, "unexpected end of regular expression" - if sourcematch("-"): - # potential range - this = sourceget() - if this == "]": - if code1[0] is IN: - code1 = code1[1][0] - setappend(code1) - setappend((LITERAL, ord("-"))) - break - elif this: - if this[0] == "\\": - code2 = _class_escape(source, this) - else: - code2 = LITERAL, ord(this) - if code1[0] != LITERAL or code2[0] != LITERAL: - raise error, "bad character range" - lo = code1[1] - hi = code2[1] - if hi < lo: - raise error, "bad character range" - setappend((RANGE, (lo, hi))) - else: - raise error, "unexpected end of regular expression" - else: - if code1[0] is IN: - code1 = code1[1][0] - setappend(code1) - - # XXX: should move set optimization to compiler! - if _len(set)==1 and set[0][0] is LITERAL: - subpatternappend(set[0]) # optimization - elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL: - subpatternappend((NOT_LITERAL, set[1][1])) # optimization - else: - # XXX: should add charmap optimization here - subpatternappend((IN, set)) - - elif this and this[0] in REPEAT_CHARS: - # repeat previous item - if this == "?": - min, max = 0, 1 - elif this == "*": - min, max = 0, MAXREPEAT - - elif this == "+": - min, max = 1, MAXREPEAT - elif this == "{": - if source.next == "}": - subpatternappend((LITERAL, ord(this))) - continue - here = source.tell() - min, max = 0, MAXREPEAT - lo = hi = "" - while source.next in DIGITS: - lo = lo + source.get() - if sourcematch(","): - while source.next in DIGITS: - hi = hi + sourceget() - else: - hi = lo - if not sourcematch("}"): - subpatternappend((LITERAL, ord(this))) - source.seek(here) - continue - if lo: - min = int(lo) - if hi: - max = int(hi) - if max < min: - raise error, "bad repeat interval" - else: - raise error, "not supported" - # figure out which item to repeat - if subpattern: - item = subpattern[-1:] - else: - item = None - if not item or (_len(item) == 1 and item[0][0] == AT): - raise error, "nothing to repeat" - if item[0][0] in REPEATCODES: - raise error, "multiple repeat" - if sourcematch("?"): - subpattern[-1] = (MIN_REPEAT, (min, max, item)) - else: - subpattern[-1] = (MAX_REPEAT, (min, max, item)) - - elif this == ".": - subpatternappend((ANY, None)) - - elif this == "(": - group = 1 - name = None - condgroup = None - if sourcematch("?"): - group = 0 - # options - if sourcematch("P"): - # python extensions - if sourcematch("<"): - # named group: skip forward to end of name - name = "" - while 1: - char = sourceget() - if char is None: - raise error, "unterminated name" - if char == ">": - break - name = name + char - group = 1 - if not isname(name): - raise error, "bad character in group name" - elif sourcematch("="): - # named backreference - name = "" - while 1: - char = sourceget() - if char is None: - raise error, "unterminated name" - if char == ")": - break - name = name + char - if not isname(name): - raise error, "bad character in group name" - gid = state.groupdict.get(name) - if gid is None: - raise error, "unknown group name" - subpatternappend((GROUPREF, gid)) - continue - else: - char = sourceget() - if char is None: - raise error, "unexpected end of pattern" - raise error, "unknown specifier: ?P%s" % char - elif sourcematch(":"): - # non-capturing group - group = 2 - elif sourcematch("#"): - # comment - while 1: - if source.next is None or source.next == ")": - break - sourceget() - if not sourcematch(")"): - raise error, "unbalanced parenthesis" - continue - elif source.next in ASSERTCHARS: - # lookahead assertions - char = sourceget() - dir = 1 - if char == "<": - if source.next not in LOOKBEHINDASSERTCHARS: - raise error, "syntax error" - dir = -1 # lookbehind - char = sourceget() - p = _parse_sub(source, state) - if not sourcematch(")"): - raise error, "unbalanced parenthesis" - if char == "=": - subpatternappend((ASSERT, (dir, p))) - else: - subpatternappend((ASSERT_NOT, (dir, p))) - continue - elif sourcematch("("): - # conditional backreference group - condname = "" - while 1: - char = sourceget() - if char is None: - raise error, "unterminated name" - if char == ")": - break - condname = condname + char - group = 2 - if isname(condname): - condgroup = state.groupdict.get(condname) - if condgroup is None: - raise error, "unknown group name" - else: - try: - condgroup = int(condname) - except ValueError: - raise error, "bad character in group name" - else: - # flags - if not source.next in FLAGS: - raise error, "unexpected end of pattern" - while source.next in FLAGS: - state.flags = state.flags | FLAGS[sourceget()] - if group: - # parse group contents - if group == 2: - # anonymous group - group = None - else: - group = state.opengroup(name) - if condgroup: - p = _parse_sub_cond(source, state, condgroup) - else: - p = _parse_sub(source, state) - if not sourcematch(")"): - raise error, "unbalanced parenthesis" - if group is not None: - state.closegroup(group) - subpatternappend((SUBPATTERN, (group, p))) - else: - while 1: - char = sourceget() - if char is None: - raise error, "unexpected end of pattern" - if char == ")": - break - raise error, "unknown extension" - - elif this == "^": - subpatternappend((AT, AT_BEGINNING)) - - elif this == "$": - subpattern.append((AT, AT_END)) - - elif this and this[0] == "\\": - code = _escape(source, this, state) - subpatternappend(code) - - else: - raise error, "parser error" - - return subpattern - -def parse(str, flags=0, pattern=None): - # parse 're' pattern into list of (opcode, argument) tuples - - source = Tokenizer(str) - - if pattern is None: - pattern = Pattern() - pattern.flags = flags - pattern.str = str - - p = _parse_sub(source, pattern, 0) - - tail = source.get() - if tail == ")": - raise error, "unbalanced parenthesis" - elif tail: - raise error, "bogus characters at end of regular expression" - - if flags & SRE_FLAG_DEBUG: - p.dump() - - if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE: - # the VERBOSE flag was switched on inside the pattern. to be - # on the safe side, we'll parse the whole thing again... - return parse(str, p.pattern.flags) - - return p - -def parse_template(source, pattern): - # parse 're' replacement string into list of literals and - # group references - s = Tokenizer(source) - sget = s.get - p = [] - a = p.append - def literal(literal, p=p, pappend=a): - if p and p[-1][0] is LITERAL: - p[-1] = LITERAL, p[-1][1] + literal - else: - pappend((LITERAL, literal)) - sep = source[:0] - if type(sep) is type(""): - makechar = chr - else: - makechar = unichr - while 1: - this = sget() - if this is None: - break # end of replacement string - if this and this[0] == "\\": - # group - c = this[1:2] - if c == "g": - name = "" - if s.match("<"): - while 1: - char = sget() - if char is None: - raise error, "unterminated group name" - if char == ">": - break - name = name + char - if not name: - raise error, "bad group name" - try: - index = int(name) - if index < 0: - raise error, "negative group number" - except ValueError: - if not isname(name): - raise error, "bad character in group name" - try: - index = pattern.groupindex[name] - except KeyError: - raise IndexError, "unknown group name" - a((MARK, index)) - elif c == "0": - if s.next in OCTDIGITS: - this = this + sget() - if s.next in OCTDIGITS: - this = this + sget() - literal(makechar(int(this[1:], 8) & 0xff)) - elif c in DIGITS: - isoctal = False - if s.next in DIGITS: - this = this + sget() - if (c in OCTDIGITS and this[2] in OCTDIGITS and - s.next in OCTDIGITS): - this = this + sget() - isoctal = True - literal(makechar(int(this[1:], 8) & 0xff)) - if not isoctal: - a((MARK, int(this[1:]))) - else: - try: - this = makechar(ESCAPES[this][1]) - except KeyError: - pass - literal(this) - else: - literal(this) - # convert template to groups and literals lists - i = 0 - groups = [] - groupsappend = groups.append - literals = [None] * len(p) - for c, s in p: - if c is MARK: - groupsappend((i, s)) - # literal[i] is already None - else: - literals[i] = s - i = i + 1 - return groups, literals - -def expand_template(template, match): - g = match.group - sep = match.string[:0] - groups, literals = template - literals = literals[:] - try: - for index, group in groups: - literals[index] = s = g(group) - if s is None: - raise error, "unmatched group" - except IndexError: - raise error, "invalid group reference" - return sep.join(literals) diff --git a/icarus-miner/data/usr/lib/python2.6/sre_parse.pyc b/icarus-miner/data/usr/lib/python2.6/sre_parse.pyc deleted file mode 100644 index c1a9ece..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/sre_parse.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/stat.py b/icarus-miner/data/usr/lib/python2.6/stat.py deleted file mode 100644 index 4a9d30c..0000000 --- a/icarus-miner/data/usr/lib/python2.6/stat.py +++ /dev/null @@ -1,94 +0,0 @@ -"""Constants/functions for interpreting results of os.stat() and os.lstat(). - -Suggested usage: from stat import * -""" - -# Indices for stat struct members in the tuple returned by os.stat() - -ST_MODE = 0 -ST_INO = 1 -ST_DEV = 2 -ST_NLINK = 3 -ST_UID = 4 -ST_GID = 5 -ST_SIZE = 6 -ST_ATIME = 7 -ST_MTIME = 8 -ST_CTIME = 9 - -# Extract bits from the mode - -def S_IMODE(mode): - return mode & 07777 - -def S_IFMT(mode): - return mode & 0170000 - -# Constants used as S_IFMT() for various file types -# (not all are implemented on all systems) - -S_IFDIR = 0040000 -S_IFCHR = 0020000 -S_IFBLK = 0060000 -S_IFREG = 0100000 -S_IFIFO = 0010000 -S_IFLNK = 0120000 -S_IFSOCK = 0140000 - -# Functions to test for each file type - -def S_ISDIR(mode): - return S_IFMT(mode) == S_IFDIR - -def S_ISCHR(mode): - return S_IFMT(mode) == S_IFCHR - -def S_ISBLK(mode): - return S_IFMT(mode) == S_IFBLK - -def S_ISREG(mode): - return S_IFMT(mode) == S_IFREG - -def S_ISFIFO(mode): - return S_IFMT(mode) == S_IFIFO - -def S_ISLNK(mode): - return S_IFMT(mode) == S_IFLNK - -def S_ISSOCK(mode): - return S_IFMT(mode) == S_IFSOCK - -# Names for permission bits - -S_ISUID = 04000 -S_ISGID = 02000 -S_ENFMT = S_ISGID -S_ISVTX = 01000 -S_IREAD = 00400 -S_IWRITE = 00200 -S_IEXEC = 00100 -S_IRWXU = 00700 -S_IRUSR = 00400 -S_IWUSR = 00200 -S_IXUSR = 00100 -S_IRWXG = 00070 -S_IRGRP = 00040 -S_IWGRP = 00020 -S_IXGRP = 00010 -S_IRWXO = 00007 -S_IROTH = 00004 -S_IWOTH = 00002 -S_IXOTH = 00001 - -# Names for file flags - -UF_NODUMP = 0x00000001 -UF_IMMUTABLE = 0x00000002 -UF_APPEND = 0x00000004 -UF_OPAQUE = 0x00000008 -UF_NOUNLINK = 0x00000010 -SF_ARCHIVED = 0x00010000 -SF_IMMUTABLE = 0x00020000 -SF_APPEND = 0x00040000 -SF_NOUNLINK = 0x00100000 -SF_SNAPSHOT = 0x00200000 diff --git a/icarus-miner/data/usr/lib/python2.6/stat.pyc b/icarus-miner/data/usr/lib/python2.6/stat.pyc deleted file mode 100644 index a31bffa..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/stat.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/string.py b/icarus-miner/data/usr/lib/python2.6/string.py deleted file mode 100644 index 6faa1f0..0000000 --- a/icarus-miner/data/usr/lib/python2.6/string.py +++ /dev/null @@ -1,642 +0,0 @@ -"""A collection of string operations (most are no longer used). - -Warning: most of the code you see here isn't normally used nowadays. -Beginning with Python 1.6, many of these functions are implemented as -methods on the standard string object. They used to be implemented by -a built-in module called strop, but strop is now obsolete itself. - -Public module variables: - -whitespace -- a string containing all characters considered whitespace -lowercase -- a string containing all characters considered lowercase letters -uppercase -- a string containing all characters considered uppercase letters -letters -- a string containing all characters considered letters -digits -- a string containing all characters considered decimal digits -hexdigits -- a string containing all characters considered hexadecimal digits -octdigits -- a string containing all characters considered octal digits -punctuation -- a string containing all characters considered punctuation -printable -- a string containing all characters considered printable - -""" - -# Some strings for ctype-style character classification -whitespace = ' \t\n\r\v\f' -lowercase = 'abcdefghijklmnopqrstuvwxyz' -uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' -letters = lowercase + uppercase -ascii_lowercase = lowercase -ascii_uppercase = uppercase -ascii_letters = ascii_lowercase + ascii_uppercase -digits = '0123456789' -hexdigits = digits + 'abcdef' + 'ABCDEF' -octdigits = '01234567' -punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~""" -printable = digits + letters + punctuation + whitespace - -# Case conversion helpers -# Use str to convert Unicode literal in case of -U -l = map(chr, xrange(256)) -_idmap = str('').join(l) -del l - -# Functions which aren't available as string methods. - -# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def". -def capwords(s, sep=None): - """capwords(s [,sep]) -> string - - Split the argument into words using split, capitalize each - word using capitalize, and join the capitalized words using - join. If the optional second argument sep is absent or None, - runs of whitespace characters are replaced by a single space - and leading and trailing whitespace are removed, otherwise - sep is used to split and join the words. - - """ - return (sep or ' ').join(x.capitalize() for x in s.split(sep)) - - -# Construct a translation string -_idmapL = None -def maketrans(fromstr, tostr): - """maketrans(frm, to) -> string - - Return a translation table (a string of 256 bytes long) - suitable for use in string.translate. The strings frm and to - must be of the same length. - - """ - if len(fromstr) != len(tostr): - raise ValueError, "maketrans arguments must have same length" - global _idmapL - if not _idmapL: - _idmapL = list(_idmap) - L = _idmapL[:] - fromstr = map(ord, fromstr) - for i in range(len(fromstr)): - L[fromstr[i]] = tostr[i] - return ''.join(L) - - - -#################################################################### -import re as _re - -class _multimap: - """Helper class for combining multiple mappings. - - Used by .{safe_,}substitute() to combine the mapping and keyword - arguments. - """ - def __init__(self, primary, secondary): - self._primary = primary - self._secondary = secondary - - def __getitem__(self, key): - try: - return self._primary[key] - except KeyError: - return self._secondary[key] - - -class _TemplateMetaclass(type): - pattern = r""" - %(delim)s(?: - (?P%(delim)s) | # Escape sequence of two delimiters - (?P%(id)s) | # delimiter and a Python identifier - {(?P%(id)s)} | # delimiter and a braced identifier - (?P) # Other ill-formed delimiter exprs - ) - """ - - def __init__(cls, name, bases, dct): - super(_TemplateMetaclass, cls).__init__(name, bases, dct) - if 'pattern' in dct: - pattern = cls.pattern - else: - pattern = _TemplateMetaclass.pattern % { - 'delim' : _re.escape(cls.delimiter), - 'id' : cls.idpattern, - } - cls.pattern = _re.compile(pattern, _re.IGNORECASE | _re.VERBOSE) - - -class Template: - """A string class for supporting $-substitutions.""" - __metaclass__ = _TemplateMetaclass - - delimiter = '$' - idpattern = r'[_a-z][_a-z0-9]*' - - def __init__(self, template): - self.template = template - - # Search for $$, $identifier, ${identifier}, and any bare $'s - - def _invalid(self, mo): - i = mo.start('invalid') - lines = self.template[:i].splitlines(True) - if not lines: - colno = 1 - lineno = 1 - else: - colno = i - len(''.join(lines[:-1])) - lineno = len(lines) - raise ValueError('Invalid placeholder in string: line %d, col %d' % - (lineno, colno)) - - def substitute(self, *args, **kws): - if len(args) > 1: - raise TypeError('Too many positional arguments') - if not args: - mapping = kws - elif kws: - mapping = _multimap(kws, args[0]) - else: - mapping = args[0] - # Helper function for .sub() - def convert(mo): - # Check the most common path first. - named = mo.group('named') or mo.group('braced') - if named is not None: - val = mapping[named] - # We use this idiom instead of str() because the latter will - # fail if val is a Unicode containing non-ASCII characters. - return '%s' % (val,) - if mo.group('escaped') is not None: - return self.delimiter - if mo.group('invalid') is not None: - self._invalid(mo) - raise ValueError('Unrecognized named group in pattern', - self.pattern) - return self.pattern.sub(convert, self.template) - - def safe_substitute(self, *args, **kws): - if len(args) > 1: - raise TypeError('Too many positional arguments') - if not args: - mapping = kws - elif kws: - mapping = _multimap(kws, args[0]) - else: - mapping = args[0] - # Helper function for .sub() - def convert(mo): - named = mo.group('named') - if named is not None: - try: - # We use this idiom instead of str() because the latter - # will fail if val is a Unicode containing non-ASCII - return '%s' % (mapping[named],) - except KeyError: - return self.delimiter + named - braced = mo.group('braced') - if braced is not None: - try: - return '%s' % (mapping[braced],) - except KeyError: - return self.delimiter + '{' + braced + '}' - if mo.group('escaped') is not None: - return self.delimiter - if mo.group('invalid') is not None: - return self.delimiter - raise ValueError('Unrecognized named group in pattern', - self.pattern) - return self.pattern.sub(convert, self.template) - - - -#################################################################### -# NOTE: Everything below here is deprecated. Use string methods instead. -# This stuff will go away in Python 3.0. - -# Backward compatible names for exceptions -index_error = ValueError -atoi_error = ValueError -atof_error = ValueError -atol_error = ValueError - -# convert UPPER CASE letters to lower case -def lower(s): - """lower(s) -> string - - Return a copy of the string s converted to lowercase. - - """ - return s.lower() - -# Convert lower case letters to UPPER CASE -def upper(s): - """upper(s) -> string - - Return a copy of the string s converted to uppercase. - - """ - return s.upper() - -# Swap lower case letters and UPPER CASE -def swapcase(s): - """swapcase(s) -> string - - Return a copy of the string s with upper case characters - converted to lowercase and vice versa. - - """ - return s.swapcase() - -# Strip leading and trailing tabs and spaces -def strip(s, chars=None): - """strip(s [,chars]) -> string - - Return a copy of the string s with leading and trailing - whitespace removed. - If chars is given and not None, remove characters in chars instead. - If chars is unicode, S will be converted to unicode before stripping. - - """ - return s.strip(chars) - -# Strip leading tabs and spaces -def lstrip(s, chars=None): - """lstrip(s [,chars]) -> string - - Return a copy of the string s with leading whitespace removed. - If chars is given and not None, remove characters in chars instead. - - """ - return s.lstrip(chars) - -# Strip trailing tabs and spaces -def rstrip(s, chars=None): - """rstrip(s [,chars]) -> string - - Return a copy of the string s with trailing whitespace removed. - If chars is given and not None, remove characters in chars instead. - - """ - return s.rstrip(chars) - - -# Split a string into a list of space/tab-separated words -def split(s, sep=None, maxsplit=-1): - """split(s [,sep [,maxsplit]]) -> list of strings - - Return a list of the words in the string s, using sep as the - delimiter string. If maxsplit is given, splits at no more than - maxsplit places (resulting in at most maxsplit+1 words). If sep - is not specified or is None, any whitespace string is a separator. - - (split and splitfields are synonymous) - - """ - return s.split(sep, maxsplit) -splitfields = split - -# Split a string into a list of space/tab-separated words -def rsplit(s, sep=None, maxsplit=-1): - """rsplit(s [,sep [,maxsplit]]) -> list of strings - - Return a list of the words in the string s, using sep as the - delimiter string, starting at the end of the string and working - to the front. If maxsplit is given, at most maxsplit splits are - done. If sep is not specified or is None, any whitespace string - is a separator. - """ - return s.rsplit(sep, maxsplit) - -# Join fields with optional separator -def join(words, sep = ' '): - """join(list [,sep]) -> string - - Return a string composed of the words in list, with - intervening occurrences of sep. The default separator is a - single space. - - (joinfields and join are synonymous) - - """ - return sep.join(words) -joinfields = join - -# Find substring, raise exception if not found -def index(s, *args): - """index(s, sub [,start [,end]]) -> int - - Like find but raises ValueError when the substring is not found. - - """ - return s.index(*args) - -# Find last substring, raise exception if not found -def rindex(s, *args): - """rindex(s, sub [,start [,end]]) -> int - - Like rfind but raises ValueError when the substring is not found. - - """ - return s.rindex(*args) - -# Count non-overlapping occurrences of substring -def count(s, *args): - """count(s, sub[, start[,end]]) -> int - - Return the number of occurrences of substring sub in string - s[start:end]. Optional arguments start and end are - interpreted as in slice notation. - - """ - return s.count(*args) - -# Find substring, return -1 if not found -def find(s, *args): - """find(s, sub [,start [,end]]) -> in - - Return the lowest index in s where substring sub is found, - such that sub is contained within s[start,end]. Optional - arguments start and end are interpreted as in slice notation. - - Return -1 on failure. - - """ - return s.find(*args) - -# Find last substring, return -1 if not found -def rfind(s, *args): - """rfind(s, sub [,start [,end]]) -> int - - Return the highest index in s where substring sub is found, - such that sub is contained within s[start,end]. Optional - arguments start and end are interpreted as in slice notation. - - Return -1 on failure. - - """ - return s.rfind(*args) - -# for a bit of speed -_float = float -_int = int -_long = long - -# Convert string to float -def atof(s): - """atof(s) -> float - - Return the floating point number represented by the string s. - - """ - return _float(s) - - -# Convert string to integer -def atoi(s , base=10): - """atoi(s [,base]) -> int - - Return the integer represented by the string s in the given - base, which defaults to 10. The string s must consist of one - or more digits, possibly preceded by a sign. If base is 0, it - is chosen from the leading characters of s, 0 for octal, 0x or - 0X for hexadecimal. If base is 16, a preceding 0x or 0X is - accepted. - - """ - return _int(s, base) - - -# Convert string to long integer -def atol(s, base=10): - """atol(s [,base]) -> long - - Return the long integer represented by the string s in the - given base, which defaults to 10. The string s must consist - of one or more digits, possibly preceded by a sign. If base - is 0, it is chosen from the leading characters of s, 0 for - octal, 0x or 0X for hexadecimal. If base is 16, a preceding - 0x or 0X is accepted. A trailing L or l is not accepted, - unless base is 0. - - """ - return _long(s, base) - - -# Left-justify a string -def ljust(s, width, *args): - """ljust(s, width[, fillchar]) -> string - - Return a left-justified version of s, in a field of the - specified width, padded with spaces as needed. The string is - never truncated. If specified the fillchar is used instead of spaces. - - """ - return s.ljust(width, *args) - -# Right-justify a string -def rjust(s, width, *args): - """rjust(s, width[, fillchar]) -> string - - Return a right-justified version of s, in a field of the - specified width, padded with spaces as needed. The string is - never truncated. If specified the fillchar is used instead of spaces. - - """ - return s.rjust(width, *args) - -# Center a string -def center(s, width, *args): - """center(s, width[, fillchar]) -> string - - Return a center version of s, in a field of the specified - width. padded with spaces as needed. The string is never - truncated. If specified the fillchar is used instead of spaces. - - """ - return s.center(width, *args) - -# Zero-fill a number, e.g., (12, 3) --> '012' and (-3, 3) --> '-03' -# Decadent feature: the argument may be a string or a number -# (Use of this is deprecated; it should be a string as with ljust c.s.) -def zfill(x, width): - """zfill(x, width) -> string - - Pad a numeric string x with zeros on the left, to fill a field - of the specified width. The string x is never truncated. - - """ - if not isinstance(x, basestring): - x = repr(x) - return x.zfill(width) - -# Expand tabs in a string. -# Doesn't take non-printing chars into account, but does understand \n. -def expandtabs(s, tabsize=8): - """expandtabs(s [,tabsize]) -> string - - Return a copy of the string s with all tab characters replaced - by the appropriate number of spaces, depending on the current - column, and the tabsize (default 8). - - """ - return s.expandtabs(tabsize) - -# Character translation through look-up table. -def translate(s, table, deletions=""): - """translate(s,table [,deletions]) -> string - - Return a copy of the string s, where all characters occurring - in the optional argument deletions are removed, and the - remaining characters have been mapped through the given - translation table, which must be a string of length 256. The - deletions argument is not allowed for Unicode strings. - - """ - if deletions or table is None: - return s.translate(table, deletions) - else: - # Add s[:0] so that if s is Unicode and table is an 8-bit string, - # table is converted to Unicode. This means that table *cannot* - # be a dictionary -- for that feature, use u.translate() directly. - return s.translate(table + s[:0]) - -# Capitalize a string, e.g. "aBc dEf" -> "Abc def". -def capitalize(s): - """capitalize(s) -> string - - Return a copy of the string s with only its first character - capitalized. - - """ - return s.capitalize() - -# Substring replacement (global) -def replace(s, old, new, maxsplit=-1): - """replace (str, old, new[, maxsplit]) -> string - - Return a copy of string str with all occurrences of substring - old replaced by new. If the optional argument maxsplit is - given, only the first maxsplit occurrences are replaced. - - """ - return s.replace(old, new, maxsplit) - - -# Try importing optional built-in module "strop" -- if it exists, -# it redefines some string operations that are 100-1000 times faster. -# It also defines values for whitespace, lowercase and uppercase -# that match 's definitions. - -try: - from strop import maketrans, lowercase, uppercase, whitespace - letters = lowercase + uppercase -except ImportError: - pass # Use the original versions - -######################################################################## -# the Formatter class -# see PEP 3101 for details and purpose of this class - -# The hard parts are reused from the C implementation. They're exposed as "_" -# prefixed methods of str and unicode. - -# The overall parser is implemented in str._formatter_parser. -# The field name parser is implemented in str._formatter_field_name_split - -class Formatter(object): - def format(self, format_string, *args, **kwargs): - return self.vformat(format_string, args, kwargs) - - def vformat(self, format_string, args, kwargs): - used_args = set() - result = self._vformat(format_string, args, kwargs, used_args, 2) - self.check_unused_args(used_args, args, kwargs) - return result - - def _vformat(self, format_string, args, kwargs, used_args, recursion_depth): - if recursion_depth < 0: - raise ValueError('Max string recursion exceeded') - result = [] - for literal_text, field_name, format_spec, conversion in \ - self.parse(format_string): - - # output the literal text - if literal_text: - result.append(literal_text) - - # if there's a field, output it - if field_name is not None: - # this is some markup, find the object and do - # the formatting - - # given the field_name, find the object it references - # and the argument it came from - obj, arg_used = self.get_field(field_name, args, kwargs) - used_args.add(arg_used) - - # do any conversion on the resulting object - obj = self.convert_field(obj, conversion) - - # expand the format spec, if needed - format_spec = self._vformat(format_spec, args, kwargs, - used_args, recursion_depth-1) - - # format the object and append to the result - result.append(self.format_field(obj, format_spec)) - - return ''.join(result) - - - def get_value(self, key, args, kwargs): - if isinstance(key, (int, long)): - return args[key] - else: - return kwargs[key] - - - def check_unused_args(self, used_args, args, kwargs): - pass - - - def format_field(self, value, format_spec): - return format(value, format_spec) - - - def convert_field(self, value, conversion): - # do any conversion on the resulting object - if conversion == 'r': - return repr(value) - elif conversion == 's': - return str(value) - elif conversion is None: - return value - raise ValueError("Unknown converion specifier {0!s}".format(conversion)) - - - # returns an iterable that contains tuples of the form: - # (literal_text, field_name, format_spec, conversion) - # literal_text can be zero length - # field_name can be None, in which case there's no - # object to format and output - # if field_name is not None, it is looked up, formatted - # with format_spec and conversion and then used - def parse(self, format_string): - return format_string._formatter_parser() - - - # given a field_name, find the object it references. - # field_name: the field being looked up, e.g. "0.name" - # or "lookup[3]" - # used_args: a set of which args have been used - # args, kwargs: as passed in to vformat - def get_field(self, field_name, args, kwargs): - first, rest = field_name._formatter_field_name_split() - - obj = self.get_value(first, args, kwargs) - - # loop through the rest of the field_name, doing - # getattr or getitem as needed - for is_attr, i in rest: - if is_attr: - obj = getattr(obj, i) - else: - obj = obj[i] - - return obj, first diff --git a/icarus-miner/data/usr/lib/python2.6/string.pyc b/icarus-miner/data/usr/lib/python2.6/string.pyc deleted file mode 100644 index c49c90a..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/string.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/struct.py b/icarus-miner/data/usr/lib/python2.6/struct.py deleted file mode 100644 index 3784c05..0000000 --- a/icarus-miner/data/usr/lib/python2.6/struct.py +++ /dev/null @@ -1,2 +0,0 @@ -from _struct import * -from _struct import _clearcache diff --git a/icarus-miner/data/usr/lib/python2.6/struct.pyc b/icarus-miner/data/usr/lib/python2.6/struct.pyc deleted file mode 100644 index 10ab8ba..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/struct.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/textwrap.py b/icarus-miner/data/usr/lib/python2.6/textwrap.py deleted file mode 100644 index 0a65303..0000000 --- a/icarus-miner/data/usr/lib/python2.6/textwrap.py +++ /dev/null @@ -1,417 +0,0 @@ -"""Text wrapping and filling. -""" - -# Copyright (C) 1999-2001 Gregory P. Ward. -# Copyright (C) 2002, 2003 Python Software Foundation. -# Written by Greg Ward - -__revision__ = "$Id: textwrap.py 68135 2009-01-01 15:46:10Z georg.brandl $" - -import string, re - -# Do the right thing with boolean values for all known Python versions -# (so this module can be copied to projects that don't depend on Python -# 2.3, e.g. Optik and Docutils) by uncommenting the block of code below. -#try: -# True, False -#except NameError: -# (True, False) = (1, 0) - -__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent'] - -# Hardcode the recognized whitespace characters to the US-ASCII -# whitespace characters. The main reason for doing this is that in -# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales -# that character winds up in string.whitespace. Respecting -# string.whitespace in those cases would 1) make textwrap treat 0xa0 the -# same as any other whitespace char, which is clearly wrong (it's a -# *non-breaking* space), 2) possibly cause problems with Unicode, -# since 0xa0 is not in range(128). -_whitespace = '\t\n\x0b\x0c\r ' - -class TextWrapper: - """ - Object for wrapping/filling text. The public interface consists of - the wrap() and fill() methods; the other methods are just there for - subclasses to override in order to tweak the default behaviour. - If you want to completely replace the main wrapping algorithm, - you'll probably have to override _wrap_chunks(). - - Several instance attributes control various aspects of wrapping: - width (default: 70) - the maximum width of wrapped lines (unless break_long_words - is false) - initial_indent (default: "") - string that will be prepended to the first line of wrapped - output. Counts towards the line's width. - subsequent_indent (default: "") - string that will be prepended to all lines save the first - of wrapped output; also counts towards each line's width. - expand_tabs (default: true) - Expand tabs in input text to spaces before further processing. - Each tab will become 1 .. 8 spaces, depending on its position in - its line. If false, each tab is treated as a single character. - replace_whitespace (default: true) - Replace all whitespace characters in the input text by spaces - after tab expansion. Note that if expand_tabs is false and - replace_whitespace is true, every tab will be converted to a - single space! - fix_sentence_endings (default: false) - Ensure that sentence-ending punctuation is always followed - by two spaces. Off by default because the algorithm is - (unavoidably) imperfect. - break_long_words (default: true) - Break words longer than 'width'. If false, those words will not - be broken, and some lines might be longer than 'width'. - break_on_hyphens (default: true) - Allow breaking hyphenated words. If true, wrapping will occur - preferably on whitespaces and right after hyphens part of - compound words. - drop_whitespace (default: true) - Drop leading and trailing whitespace from lines. - """ - - whitespace_trans = string.maketrans(_whitespace, ' ' * len(_whitespace)) - - unicode_whitespace_trans = {} - uspace = ord(u' ') - for x in map(ord, _whitespace): - unicode_whitespace_trans[x] = uspace - - # This funky little regex is just the trick for splitting - # text up into word-wrappable chunks. E.g. - # "Hello there -- you goof-ball, use the -b option!" - # splits into - # Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option! - # (after stripping out empty strings). - wordsep_re = re.compile( - r'(\s+|' # any whitespace - r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated words - r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash - - # This less funky little regex just split on recognized spaces. E.g. - # "Hello there -- you goof-ball, use the -b option!" - # splits into - # Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/ - wordsep_simple_re = re.compile(r'(\s+)') - - # XXX this is not locale- or charset-aware -- string.lowercase - # is US-ASCII only (and therefore English-only) - sentence_end_re = re.compile(r'[%s]' # lowercase letter - r'[\.\!\?]' # sentence-ending punct. - r'[\"\']?' # optional end-of-quote - r'\Z' # end of chunk - % string.lowercase) - - - def __init__(self, - width=70, - initial_indent="", - subsequent_indent="", - expand_tabs=True, - replace_whitespace=True, - fix_sentence_endings=False, - break_long_words=True, - drop_whitespace=True, - break_on_hyphens=True): - self.width = width - self.initial_indent = initial_indent - self.subsequent_indent = subsequent_indent - self.expand_tabs = expand_tabs - self.replace_whitespace = replace_whitespace - self.fix_sentence_endings = fix_sentence_endings - self.break_long_words = break_long_words - self.drop_whitespace = drop_whitespace - self.break_on_hyphens = break_on_hyphens - - # recompile the regexes for Unicode mode -- done in this clumsy way for - # backwards compatibility because it's rather common to monkey-patch - # the TextWrapper class' wordsep_re attribute. - self.wordsep_re_uni = re.compile(self.wordsep_re.pattern, re.U) - self.wordsep_simple_re_uni = re.compile( - self.wordsep_simple_re.pattern, re.U) - - - # -- Private methods ----------------------------------------------- - # (possibly useful for subclasses to override) - - def _munge_whitespace(self, text): - """_munge_whitespace(text : string) -> string - - Munge whitespace in text: expand tabs and convert all other - whitespace characters to spaces. Eg. " foo\tbar\n\nbaz" - becomes " foo bar baz". - """ - if self.expand_tabs: - text = text.expandtabs() - if self.replace_whitespace: - if isinstance(text, str): - text = text.translate(self.whitespace_trans) - elif isinstance(text, unicode): - text = text.translate(self.unicode_whitespace_trans) - return text - - - def _split(self, text): - """_split(text : string) -> [string] - - Split the text to wrap into indivisible chunks. Chunks are - not quite the same as words; see wrap_chunks() for full - details. As an example, the text - Look, goof-ball -- use the -b option! - breaks into the following chunks: - 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ', - 'use', ' ', 'the', ' ', '-b', ' ', 'option!' - if break_on_hyphens is True, or in: - 'Look,', ' ', 'goof-ball', ' ', '--', ' ', - 'use', ' ', 'the', ' ', '-b', ' ', option!' - otherwise. - """ - if isinstance(text, unicode): - if self.break_on_hyphens: - pat = self.wordsep_re_uni - else: - pat = self.wordsep_simple_re_uni - else: - if self.break_on_hyphens: - pat = self.wordsep_re - else: - pat = self.wordsep_simple_re - chunks = pat.split(text) - chunks = filter(None, chunks) # remove empty chunks - return chunks - - def _fix_sentence_endings(self, chunks): - """_fix_sentence_endings(chunks : [string]) - - Correct for sentence endings buried in 'chunks'. Eg. when the - original text contains "... foo.\nBar ...", munge_whitespace() - and split() will convert that to [..., "foo.", " ", "Bar", ...] - which has one too few spaces; this method simply changes the one - space to two. - """ - i = 0 - pat = self.sentence_end_re - while i < len(chunks)-1: - if chunks[i+1] == " " and pat.search(chunks[i]): - chunks[i+1] = " " - i += 2 - else: - i += 1 - - def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): - """_handle_long_word(chunks : [string], - cur_line : [string], - cur_len : int, width : int) - - Handle a chunk of text (most likely a word, not whitespace) that - is too long to fit in any line. - """ - # Figure out when indent is larger than the specified width, and make - # sure at least one character is stripped off on every pass - if width < 1: - space_left = 1 - else: - space_left = width - cur_len - - # If we're allowed to break long words, then do so: put as much - # of the next chunk onto the current line as will fit. - if self.break_long_words: - cur_line.append(reversed_chunks[-1][:space_left]) - reversed_chunks[-1] = reversed_chunks[-1][space_left:] - - # Otherwise, we have to preserve the long word intact. Only add - # it to the current line if there's nothing already there -- - # that minimizes how much we violate the width constraint. - elif not cur_line: - cur_line.append(reversed_chunks.pop()) - - # If we're not allowed to break long words, and there's already - # text on the current line, do nothing. Next time through the - # main loop of _wrap_chunks(), we'll wind up here again, but - # cur_len will be zero, so the next line will be entirely - # devoted to the long word that we can't handle right now. - - def _wrap_chunks(self, chunks): - """_wrap_chunks(chunks : [string]) -> [string] - - Wrap a sequence of text chunks and return a list of lines of - length 'self.width' or less. (If 'break_long_words' is false, - some lines may be longer than this.) Chunks correspond roughly - to words and the whitespace between them: each chunk is - indivisible (modulo 'break_long_words'), but a line break can - come between any two chunks. Chunks should not have internal - whitespace; ie. a chunk is either all whitespace or a "word". - Whitespace chunks will be removed from the beginning and end of - lines, but apart from that whitespace is preserved. - """ - lines = [] - if self.width <= 0: - raise ValueError("invalid width %r (must be > 0)" % self.width) - - # Arrange in reverse order so items can be efficiently popped - # from a stack of chucks. - chunks.reverse() - - while chunks: - - # Start the list of chunks that will make up the current line. - # cur_len is just the length of all the chunks in cur_line. - cur_line = [] - cur_len = 0 - - # Figure out which static string will prefix this line. - if lines: - indent = self.subsequent_indent - else: - indent = self.initial_indent - - # Maximum width for this line. - width = self.width - len(indent) - - # First chunk on line is whitespace -- drop it, unless this - # is the very beginning of the text (ie. no lines started yet). - if self.drop_whitespace and chunks[-1].strip() == '' and lines: - del chunks[-1] - - while chunks: - l = len(chunks[-1]) - - # Can at least squeeze this chunk onto the current line. - if cur_len + l <= width: - cur_line.append(chunks.pop()) - cur_len += l - - # Nope, this line is full. - else: - break - - # The current line is full, and the next chunk is too big to - # fit on *any* line (not just this one). - if chunks and len(chunks[-1]) > width: - self._handle_long_word(chunks, cur_line, cur_len, width) - - # If the last chunk on this line is all whitespace, drop it. - if self.drop_whitespace and cur_line and cur_line[-1].strip() == '': - del cur_line[-1] - - # Convert current line back to a string and store it in list - # of all lines (return value). - if cur_line: - lines.append(indent + ''.join(cur_line)) - - return lines - - - # -- Public interface ---------------------------------------------- - - def wrap(self, text): - """wrap(text : string) -> [string] - - Reformat the single paragraph in 'text' so it fits in lines of - no more than 'self.width' columns, and return a list of wrapped - lines. Tabs in 'text' are expanded with string.expandtabs(), - and all other whitespace characters (including newline) are - converted to space. - """ - text = self._munge_whitespace(text) - chunks = self._split(text) - if self.fix_sentence_endings: - self._fix_sentence_endings(chunks) - return self._wrap_chunks(chunks) - - def fill(self, text): - """fill(text : string) -> string - - Reformat the single paragraph in 'text' to fit in lines of no - more than 'self.width' columns, and return a new string - containing the entire wrapped paragraph. - """ - return "\n".join(self.wrap(text)) - - -# -- Convenience interface --------------------------------------------- - -def wrap(text, width=70, **kwargs): - """Wrap a single paragraph of text, returning a list of wrapped lines. - - Reformat the single paragraph in 'text' so it fits in lines of no - more than 'width' columns, and return a list of wrapped lines. By - default, tabs in 'text' are expanded with string.expandtabs(), and - all other whitespace characters (including newline) are converted to - space. See TextWrapper class for available keyword args to customize - wrapping behaviour. - """ - w = TextWrapper(width=width, **kwargs) - return w.wrap(text) - -def fill(text, width=70, **kwargs): - """Fill a single paragraph of text, returning a new string. - - Reformat the single paragraph in 'text' to fit in lines of no more - than 'width' columns, and return a new string containing the entire - wrapped paragraph. As with wrap(), tabs are expanded and other - whitespace characters converted to space. See TextWrapper class for - available keyword args to customize wrapping behaviour. - """ - w = TextWrapper(width=width, **kwargs) - return w.fill(text) - - -# -- Loosely related functionality ------------------------------------- - -_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE) -_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE) - -def dedent(text): - """Remove any common leading whitespace from every line in `text`. - - This can be used to make triple-quoted strings line up with the left - edge of the display, while still presenting them in the source code - in indented form. - - Note that tabs and spaces are both treated as whitespace, but they - are not equal: the lines " hello" and "\thello" are - considered to have no common leading whitespace. (This behaviour is - new in Python 2.5; older versions of this module incorrectly - expanded tabs before searching for common leading whitespace.) - """ - # Look for the longest leading string of spaces and tabs common to - # all lines. - margin = None - text = _whitespace_only_re.sub('', text) - indents = _leading_whitespace_re.findall(text) - for indent in indents: - if margin is None: - margin = indent - - # Current line more deeply indented than previous winner: - # no change (previous winner is still on top). - elif indent.startswith(margin): - pass - - # Current line consistent with and no deeper than previous winner: - # it's the new winner. - elif margin.startswith(indent): - margin = indent - - # Current line and previous winner have no common whitespace: - # there is no margin. - else: - margin = "" - break - - # sanity check (testing/debugging only) - if 0 and margin: - for line in text.split("\n"): - assert not line or line.startswith(margin), \ - "line = %r, margin = %r" % (line, margin) - - if margin: - text = re.sub(r'(?m)^' + margin, '', text) - return text - -if __name__ == "__main__": - #print dedent("\tfoo\n\tbar") - #print dedent(" \thello there\n \t how are you?") - print dedent("Hello there.\n This is indented.") diff --git a/icarus-miner/data/usr/lib/python2.6/textwrap.pyc b/icarus-miner/data/usr/lib/python2.6/textwrap.pyc deleted file mode 100644 index 4c347e9..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/textwrap.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/threading.py b/icarus-miner/data/usr/lib/python2.6/threading.py deleted file mode 100644 index c1791d7..0000000 --- a/icarus-miner/data/usr/lib/python2.6/threading.py +++ /dev/null @@ -1,960 +0,0 @@ -"""Thread module emulating a subset of Java's threading model.""" - -import sys as _sys - -try: - import thread -except ImportError: - del _sys.modules[__name__] - raise - -import warnings - -from functools import wraps -from time import time as _time, sleep as _sleep -from traceback import format_exc as _format_exc -from collections import deque - -# Note regarding PEP 8 compliant aliases -# This threading model was originally inspired by Java, and inherited -# the convention of camelCase function and method names from that -# language. While those names are not in any imminent danger of being -# deprecated, starting with Python 2.6, the module now provides a -# PEP 8 compliant alias for any such method name. -# Using the new PEP 8 compliant names also facilitates substitution -# with the multiprocessing module, which doesn't provide the old -# Java inspired names. - - -# Rename some stuff so "from threading import *" is safe -__all__ = ['activeCount', 'active_count', 'Condition', 'currentThread', - 'current_thread', 'enumerate', 'Event', - 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread', - 'Timer', 'setprofile', 'settrace', 'local', 'stack_size'] - -_start_new_thread = thread.start_new_thread -_allocate_lock = thread.allocate_lock -_get_ident = thread.get_ident -ThreadError = thread.error -del thread - - -# sys.exc_clear is used to work around the fact that except blocks -# don't fully clear the exception until 3.0. -warnings.filterwarnings('ignore', category=DeprecationWarning, - module='threading', message='sys.exc_clear') - -# Debug support (adapted from ihooks.py). -# All the major classes here derive from _Verbose. We force that to -# be a new-style class so that all the major classes here are new-style. -# This helps debugging (type(instance) is more revealing for instances -# of new-style classes). - -_VERBOSE = False - -if __debug__: - - class _Verbose(object): - - def __init__(self, verbose=None): - if verbose is None: - verbose = _VERBOSE - self.__verbose = verbose - - def _note(self, format, *args): - if self.__verbose: - format = format % args - format = "%s: %s\n" % ( - current_thread().name, format) - _sys.stderr.write(format) - -else: - # Disable this when using "python -O" - class _Verbose(object): - def __init__(self, verbose=None): - pass - def _note(self, *args): - pass - -# Support for profile and trace hooks - -_profile_hook = None -_trace_hook = None - -def setprofile(func): - global _profile_hook - _profile_hook = func - -def settrace(func): - global _trace_hook - _trace_hook = func - -# Synchronization classes - -Lock = _allocate_lock - -def RLock(*args, **kwargs): - return _RLock(*args, **kwargs) - -class _RLock(_Verbose): - - def __init__(self, verbose=None): - _Verbose.__init__(self, verbose) - self.__block = _allocate_lock() - self.__owner = None - self.__count = 0 - - def __repr__(self): - owner = self.__owner - return "<%s(%s, %d)>" % ( - self.__class__.__name__, - owner and owner.name, - self.__count) - - def acquire(self, blocking=1): - me = current_thread() - if self.__owner is me: - self.__count = self.__count + 1 - if __debug__: - self._note("%s.acquire(%s): recursive success", self, blocking) - return 1 - rc = self.__block.acquire(blocking) - if rc: - self.__owner = me - self.__count = 1 - if __debug__: - self._note("%s.acquire(%s): initial success", self, blocking) - else: - if __debug__: - self._note("%s.acquire(%s): failure", self, blocking) - return rc - - __enter__ = acquire - - def release(self): - if self.__owner is not current_thread(): - raise RuntimeError("cannot release un-aquired lock") - self.__count = count = self.__count - 1 - if not count: - self.__owner = None - self.__block.release() - if __debug__: - self._note("%s.release(): final release", self) - else: - if __debug__: - self._note("%s.release(): non-final release", self) - - def __exit__(self, t, v, tb): - self.release() - - # Internal methods used by condition variables - - def _acquire_restore(self, count_owner): - count, owner = count_owner - self.__block.acquire() - self.__count = count - self.__owner = owner - if __debug__: - self._note("%s._acquire_restore()", self) - - def _release_save(self): - if __debug__: - self._note("%s._release_save()", self) - count = self.__count - self.__count = 0 - owner = self.__owner - self.__owner = None - self.__block.release() - return (count, owner) - - def _is_owned(self): - return self.__owner is current_thread() - - -def Condition(*args, **kwargs): - return _Condition(*args, **kwargs) - -class _Condition(_Verbose): - - def __init__(self, lock=None, verbose=None): - _Verbose.__init__(self, verbose) - if lock is None: - lock = RLock() - self.__lock = lock - # Export the lock's acquire() and release() methods - self.acquire = lock.acquire - self.release = lock.release - # If the lock defines _release_save() and/or _acquire_restore(), - # these override the default implementations (which just call - # release() and acquire() on the lock). Ditto for _is_owned(). - try: - self._release_save = lock._release_save - except AttributeError: - pass - try: - self._acquire_restore = lock._acquire_restore - except AttributeError: - pass - try: - self._is_owned = lock._is_owned - except AttributeError: - pass - self.__waiters = [] - - def __enter__(self): - return self.__lock.__enter__() - - def __exit__(self, *args): - return self.__lock.__exit__(*args) - - def __repr__(self): - return "" % (self.__lock, len(self.__waiters)) - - def _release_save(self): - self.__lock.release() # No state to save - - def _acquire_restore(self, x): - self.__lock.acquire() # Ignore saved state - - def _is_owned(self): - # Return True if lock is owned by current_thread. - # This method is called only if __lock doesn't have _is_owned(). - if self.__lock.acquire(0): - self.__lock.release() - return False - else: - return True - - def wait(self, timeout=None): - if not self._is_owned(): - raise RuntimeError("cannot wait on un-aquired lock") - waiter = _allocate_lock() - waiter.acquire() - self.__waiters.append(waiter) - saved_state = self._release_save() - try: # restore state no matter what (e.g., KeyboardInterrupt) - if timeout is None: - waiter.acquire() - if __debug__: - self._note("%s.wait(): got it", self) - else: - # Balancing act: We can't afford a pure busy loop, so we - # have to sleep; but if we sleep the whole timeout time, - # we'll be unresponsive. The scheme here sleeps very - # little at first, longer as time goes on, but never longer - # than 20 times per second (or the timeout time remaining). - endtime = _time() + timeout - delay = 0.0005 # 500 us -> initial delay of 1 ms - while True: - gotit = waiter.acquire(0) - if gotit: - break - remaining = endtime - _time() - if remaining <= 0: - break - delay = min(delay * 2, remaining, .05) - _sleep(delay) - if not gotit: - if __debug__: - self._note("%s.wait(%s): timed out", self, timeout) - try: - self.__waiters.remove(waiter) - except ValueError: - pass - else: - if __debug__: - self._note("%s.wait(%s): got it", self, timeout) - finally: - self._acquire_restore(saved_state) - - def notify(self, n=1): - if not self._is_owned(): - raise RuntimeError("cannot notify on un-aquired lock") - __waiters = self.__waiters - waiters = __waiters[:n] - if not waiters: - if __debug__: - self._note("%s.notify(): no waiters", self) - return - self._note("%s.notify(): notifying %d waiter%s", self, n, - n!=1 and "s" or "") - for waiter in waiters: - waiter.release() - try: - __waiters.remove(waiter) - except ValueError: - pass - - def notifyAll(self): - self.notify(len(self.__waiters)) - - notify_all = notifyAll - - -def Semaphore(*args, **kwargs): - return _Semaphore(*args, **kwargs) - -class _Semaphore(_Verbose): - - # After Tim Peters' semaphore class, but not quite the same (no maximum) - - def __init__(self, value=1, verbose=None): - if value < 0: - raise ValueError("semaphore initial value must be >= 0") - _Verbose.__init__(self, verbose) - self.__cond = Condition(Lock()) - self.__value = value - - def acquire(self, blocking=1): - rc = False - self.__cond.acquire() - while self.__value == 0: - if not blocking: - break - if __debug__: - self._note("%s.acquire(%s): blocked waiting, value=%s", - self, blocking, self.__value) - self.__cond.wait() - else: - self.__value = self.__value - 1 - if __debug__: - self._note("%s.acquire: success, value=%s", - self, self.__value) - rc = True - self.__cond.release() - return rc - - __enter__ = acquire - - def release(self): - self.__cond.acquire() - self.__value = self.__value + 1 - if __debug__: - self._note("%s.release: success, value=%s", - self, self.__value) - self.__cond.notify() - self.__cond.release() - - def __exit__(self, t, v, tb): - self.release() - - -def BoundedSemaphore(*args, **kwargs): - return _BoundedSemaphore(*args, **kwargs) - -class _BoundedSemaphore(_Semaphore): - """Semaphore that checks that # releases is <= # acquires""" - def __init__(self, value=1, verbose=None): - _Semaphore.__init__(self, value, verbose) - self._initial_value = value - - def release(self): - if self._Semaphore__value >= self._initial_value: - raise ValueError, "Semaphore released too many times" - return _Semaphore.release(self) - - -def Event(*args, **kwargs): - return _Event(*args, **kwargs) - -class _Event(_Verbose): - - # After Tim Peters' event class (without is_posted()) - - def __init__(self, verbose=None): - _Verbose.__init__(self, verbose) - self.__cond = Condition(Lock()) - self.__flag = False - - def isSet(self): - return self.__flag - - is_set = isSet - - def set(self): - self.__cond.acquire() - try: - self.__flag = True - self.__cond.notify_all() - finally: - self.__cond.release() - - def clear(self): - self.__cond.acquire() - try: - self.__flag = False - finally: - self.__cond.release() - - def wait(self, timeout=None): - self.__cond.acquire() - try: - if not self.__flag: - self.__cond.wait(timeout) - finally: - self.__cond.release() - -# Helper to generate new thread names -_counter = 0 -def _newname(template="Thread-%d"): - global _counter - _counter = _counter + 1 - return template % _counter - -# Active thread administration -_active_limbo_lock = _allocate_lock() -_active = {} # maps thread id to Thread object -_limbo = {} - - -# Main class for threads - -class Thread(_Verbose): - - __initialized = False - # Need to store a reference to sys.exc_info for printing - # out exceptions when a thread tries to use a global var. during interp. - # shutdown and thus raises an exception about trying to perform some - # operation on/with a NoneType - __exc_info = _sys.exc_info - # Keep sys.exc_clear too to clear the exception just before - # allowing .join() to return. - __exc_clear = _sys.exc_clear - - def __init__(self, group=None, target=None, name=None, - args=(), kwargs=None, verbose=None): - assert group is None, "group argument must be None for now" - _Verbose.__init__(self, verbose) - if kwargs is None: - kwargs = {} - self.__target = target - self.__name = str(name or _newname()) - self.__args = args - self.__kwargs = kwargs - self.__daemonic = self._set_daemon() - self.__ident = None - self.__started = Event() - self.__stopped = False - self.__block = Condition(Lock()) - self.__initialized = True - # sys.stderr is not stored in the class like - # sys.exc_info since it can be changed between instances - self.__stderr = _sys.stderr - - def _set_daemon(self): - # Overridden in _MainThread and _DummyThread - return current_thread().daemon - - def __repr__(self): - assert self.__initialized, "Thread.__init__() was not called" - status = "initial" - if self.__started.is_set(): - status = "started" - if self.__stopped: - status = "stopped" - if self.__daemonic: - status += " daemon" - if self.__ident is not None: - status += " %s" % self.__ident - return "<%s(%s, %s)>" % (self.__class__.__name__, self.__name, status) - - def start(self): - if not self.__initialized: - raise RuntimeError("thread.__init__() not called") - if self.__started.is_set(): - raise RuntimeError("thread already started") - if __debug__: - self._note("%s.start(): starting thread", self) - _active_limbo_lock.acquire() - _limbo[self] = self - _active_limbo_lock.release() - _start_new_thread(self.__bootstrap, ()) - self.__started.wait() - - def run(self): - try: - if self.__target: - self.__target(*self.__args, **self.__kwargs) - finally: - # Avoid a refcycle if the thread is running a function with - # an argument that has a member that points to the thread. - del self.__target, self.__args, self.__kwargs - - def __bootstrap(self): - # Wrapper around the real bootstrap code that ignores - # exceptions during interpreter cleanup. Those typically - # happen when a daemon thread wakes up at an unfortunate - # moment, finds the world around it destroyed, and raises some - # random exception *** while trying to report the exception in - # __bootstrap_inner() below ***. Those random exceptions - # don't help anybody, and they confuse users, so we suppress - # them. We suppress them only when it appears that the world - # indeed has already been destroyed, so that exceptions in - # __bootstrap_inner() during normal business hours are properly - # reported. Also, we only suppress them for daemonic threads; - # if a non-daemonic encounters this, something else is wrong. - try: - self.__bootstrap_inner() - except: - if self.__daemonic and _sys is None: - return - raise - - def _set_ident(self): - self.__ident = _get_ident() - - def __bootstrap_inner(self): - try: - self._set_ident() - self.__started.set() - _active_limbo_lock.acquire() - _active[self.__ident] = self - del _limbo[self] - _active_limbo_lock.release() - if __debug__: - self._note("%s.__bootstrap(): thread started", self) - - if _trace_hook: - self._note("%s.__bootstrap(): registering trace hook", self) - _sys.settrace(_trace_hook) - if _profile_hook: - self._note("%s.__bootstrap(): registering profile hook", self) - _sys.setprofile(_profile_hook) - - try: - self.run() - except SystemExit: - if __debug__: - self._note("%s.__bootstrap(): raised SystemExit", self) - except: - if __debug__: - self._note("%s.__bootstrap(): unhandled exception", self) - # If sys.stderr is no more (most likely from interpreter - # shutdown) use self.__stderr. Otherwise still use sys (as in - # _sys) in case sys.stderr was redefined since the creation of - # self. - if _sys: - _sys.stderr.write("Exception in thread %s:\n%s\n" % - (self.name, _format_exc())) - else: - # Do the best job possible w/o a huge amt. of code to - # approximate a traceback (code ideas from - # Lib/traceback.py) - exc_type, exc_value, exc_tb = self.__exc_info() - try: - print>>self.__stderr, ( - "Exception in thread " + self.name + - " (most likely raised during interpreter shutdown):") - print>>self.__stderr, ( - "Traceback (most recent call last):") - while exc_tb: - print>>self.__stderr, ( - ' File "%s", line %s, in %s' % - (exc_tb.tb_frame.f_code.co_filename, - exc_tb.tb_lineno, - exc_tb.tb_frame.f_code.co_name)) - exc_tb = exc_tb.tb_next - print>>self.__stderr, ("%s: %s" % (exc_type, exc_value)) - # Make sure that exc_tb gets deleted since it is a memory - # hog; deleting everything else is just for thoroughness - finally: - del exc_type, exc_value, exc_tb - else: - if __debug__: - self._note("%s.__bootstrap(): normal return", self) - finally: - # Prevent a race in - # test_threading.test_no_refcycle_through_target when - # the exception keeps the target alive past when we - # assert that it's dead. - self.__exc_clear() - finally: - with _active_limbo_lock: - self.__stop() - try: - # We don't call self.__delete() because it also - # grabs _active_limbo_lock. - del _active[_get_ident()] - except: - pass - - def __stop(self): - self.__block.acquire() - self.__stopped = True - self.__block.notify_all() - self.__block.release() - - def __delete(self): - "Remove current thread from the dict of currently running threads." - - # Notes about running with dummy_thread: - # - # Must take care to not raise an exception if dummy_thread is being - # used (and thus this module is being used as an instance of - # dummy_threading). dummy_thread.get_ident() always returns -1 since - # there is only one thread if dummy_thread is being used. Thus - # len(_active) is always <= 1 here, and any Thread instance created - # overwrites the (if any) thread currently registered in _active. - # - # An instance of _MainThread is always created by 'threading'. This - # gets overwritten the instant an instance of Thread is created; both - # threads return -1 from dummy_thread.get_ident() and thus have the - # same key in the dict. So when the _MainThread instance created by - # 'threading' tries to clean itself up when atexit calls this method - # it gets a KeyError if another Thread instance was created. - # - # This all means that KeyError from trying to delete something from - # _active if dummy_threading is being used is a red herring. But - # since it isn't if dummy_threading is *not* being used then don't - # hide the exception. - - try: - with _active_limbo_lock: - del _active[_get_ident()] - # There must not be any python code between the previous line - # and after the lock is released. Otherwise a tracing function - # could try to acquire the lock again in the same thread, (in - # current_thread()), and would block. - except KeyError: - if 'dummy_threading' not in _sys.modules: - raise - - def join(self, timeout=None): - if not self.__initialized: - raise RuntimeError("Thread.__init__() not called") - if not self.__started.is_set(): - raise RuntimeError("cannot join thread before it is started") - if self is current_thread(): - raise RuntimeError("cannot join current thread") - - if __debug__: - if not self.__stopped: - self._note("%s.join(): waiting until thread stops", self) - self.__block.acquire() - try: - if timeout is None: - while not self.__stopped: - self.__block.wait() - if __debug__: - self._note("%s.join(): thread stopped", self) - else: - deadline = _time() + timeout - while not self.__stopped: - delay = deadline - _time() - if delay <= 0: - if __debug__: - self._note("%s.join(): timed out", self) - break - self.__block.wait(delay) - else: - if __debug__: - self._note("%s.join(): thread stopped", self) - finally: - self.__block.release() - - @property - def name(self): - assert self.__initialized, "Thread.__init__() not called" - return self.__name - - @name.setter - def name(self, name): - assert self.__initialized, "Thread.__init__() not called" - self.__name = str(name) - - @property - def ident(self): - assert self.__initialized, "Thread.__init__() not called" - return self.__ident - - def isAlive(self): - assert self.__initialized, "Thread.__init__() not called" - return self.__started.is_set() and not self.__stopped - - is_alive = isAlive - - @property - def daemon(self): - assert self.__initialized, "Thread.__init__() not called" - return self.__daemonic - - @daemon.setter - def daemon(self, daemonic): - if not self.__initialized: - raise RuntimeError("Thread.__init__() not called") - if self.__started.is_set(): - raise RuntimeError("cannot set daemon status of active thread"); - self.__daemonic = daemonic - - def isDaemon(self): - return self.daemon - - def setDaemon(self, daemonic): - self.daemon = daemonic - - def getName(self): - return self.name - - def setName(self, name): - self.name = name - -# The timer class was contributed by Itamar Shtull-Trauring - -def Timer(*args, **kwargs): - return _Timer(*args, **kwargs) - -class _Timer(Thread): - """Call a function after a specified number of seconds: - - t = Timer(30.0, f, args=[], kwargs={}) - t.start() - t.cancel() # stop the timer's action if it's still waiting - """ - - def __init__(self, interval, function, args=[], kwargs={}): - Thread.__init__(self) - self.interval = interval - self.function = function - self.args = args - self.kwargs = kwargs - self.finished = Event() - - def cancel(self): - """Stop the timer if it hasn't finished yet""" - self.finished.set() - - def run(self): - self.finished.wait(self.interval) - if not self.finished.is_set(): - self.function(*self.args, **self.kwargs) - self.finished.set() - -# Special thread class to represent the main thread -# This is garbage collected through an exit handler - -class _MainThread(Thread): - - def __init__(self): - Thread.__init__(self, name="MainThread") - self._Thread__started.set() - self._set_ident() - _active_limbo_lock.acquire() - _active[_get_ident()] = self - _active_limbo_lock.release() - - def _set_daemon(self): - return False - - def _exitfunc(self): - self._Thread__stop() - t = _pickSomeNonDaemonThread() - if t: - if __debug__: - self._note("%s: waiting for other threads", self) - while t: - t.join() - t = _pickSomeNonDaemonThread() - if __debug__: - self._note("%s: exiting", self) - self._Thread__delete() - -def _pickSomeNonDaemonThread(): - for t in enumerate(): - if not t.daemon and t.is_alive(): - return t - return None - - -# Dummy thread class to represent threads not started here. -# These aren't garbage collected when they die, nor can they be waited for. -# If they invoke anything in threading.py that calls current_thread(), they -# leave an entry in the _active dict forever after. -# Their purpose is to return *something* from current_thread(). -# They are marked as daemon threads so we won't wait for them -# when we exit (conform previous semantics). - -class _DummyThread(Thread): - - def __init__(self): - Thread.__init__(self, name=_newname("Dummy-%d")) - - # Thread.__block consumes an OS-level locking primitive, which - # can never be used by a _DummyThread. Since a _DummyThread - # instance is immortal, that's bad, so release this resource. - del self._Thread__block - - self._Thread__started.set() - self._set_ident() - _active_limbo_lock.acquire() - _active[_get_ident()] = self - _active_limbo_lock.release() - - def _set_daemon(self): - return True - - def join(self, timeout=None): - assert False, "cannot join a dummy thread" - - -# Global API functions - -def currentThread(): - try: - return _active[_get_ident()] - except KeyError: - ##print "current_thread(): no current thread for", _get_ident() - return _DummyThread() - -current_thread = currentThread - -def activeCount(): - _active_limbo_lock.acquire() - count = len(_active) + len(_limbo) - _active_limbo_lock.release() - return count - -active_count = activeCount - -def enumerate(): - _active_limbo_lock.acquire() - active = _active.values() + _limbo.values() - _active_limbo_lock.release() - return active - -from thread import stack_size - -# Create the main thread object, -# and make it available for the interpreter -# (Py_Main) as threading._shutdown. - -_shutdown = _MainThread()._exitfunc - -# get thread-local implementation, either from the thread -# module, or from the python fallback - -try: - from thread import _local as local -except ImportError: - from _threading_local import local - - -def _after_fork(): - # This function is called by Python/ceval.c:PyEval_ReInitThreads which - # is called from PyOS_AfterFork. Here we cleanup threading module state - # that should not exist after a fork. - - # Reset _active_limbo_lock, in case we forked while the lock was held - # by another (non-forked) thread. http://bugs.python.org/issue874900 - global _active_limbo_lock - _active_limbo_lock = _allocate_lock() - - # fork() only copied the current thread; clear references to others. - new_active = {} - current = current_thread() - with _active_limbo_lock: - for thread in _active.itervalues(): - if thread is current: - # There is only one active thread. We reset the ident to - # its new value since it can have changed. - ident = _get_ident() - thread._Thread__ident = ident - new_active[ident] = thread - else: - # All the others are already stopped. - # We don't call _Thread__stop() because it tries to acquire - # thread._Thread__block which could also have been held while - # we forked. - thread._Thread__stopped = True - - _limbo.clear() - _active.clear() - _active.update(new_active) - assert len(_active) == 1 - - -# Self-test code - -def _test(): - - class BoundedQueue(_Verbose): - - def __init__(self, limit): - _Verbose.__init__(self) - self.mon = RLock() - self.rc = Condition(self.mon) - self.wc = Condition(self.mon) - self.limit = limit - self.queue = deque() - - def put(self, item): - self.mon.acquire() - while len(self.queue) >= self.limit: - self._note("put(%s): queue full", item) - self.wc.wait() - self.queue.append(item) - self._note("put(%s): appended, length now %d", - item, len(self.queue)) - self.rc.notify() - self.mon.release() - - def get(self): - self.mon.acquire() - while not self.queue: - self._note("get(): queue empty") - self.rc.wait() - item = self.queue.popleft() - self._note("get(): got %s, %d left", item, len(self.queue)) - self.wc.notify() - self.mon.release() - return item - - class ProducerThread(Thread): - - def __init__(self, queue, quota): - Thread.__init__(self, name="Producer") - self.queue = queue - self.quota = quota - - def run(self): - from random import random - counter = 0 - while counter < self.quota: - counter = counter + 1 - self.queue.put("%s.%d" % (self.name, counter)) - _sleep(random() * 0.00001) - - - class ConsumerThread(Thread): - - def __init__(self, queue, count): - Thread.__init__(self, name="Consumer") - self.queue = queue - self.count = count - - def run(self): - while self.count > 0: - item = self.queue.get() - print item - self.count = self.count - 1 - - NP = 3 - QL = 4 - NI = 5 - - Q = BoundedQueue(QL) - P = [] - for i in range(NP): - t = ProducerThread(Q, NI) - t.name = ("Producer-%d" % (i+1)) - P.append(t) - C = ConsumerThread(Q, NI*NP) - for t in P: - t.start() - _sleep(0.000001) - C.start() - for t in P: - t.join() - C.join() - -if __name__ == '__main__': - _test() diff --git a/icarus-miner/data/usr/lib/python2.6/threading.pyc b/icarus-miner/data/usr/lib/python2.6/threading.pyc deleted file mode 100644 index 1ebd77b..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/threading.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/traceback.py b/icarus-miner/data/usr/lib/python2.6/traceback.py deleted file mode 100644 index 3d877ee..0000000 --- a/icarus-miner/data/usr/lib/python2.6/traceback.py +++ /dev/null @@ -1,312 +0,0 @@ -"""Extract, format and print information about Python stack traces.""" - -import linecache -import sys -import types - -__all__ = ['extract_stack', 'extract_tb', 'format_exception', - 'format_exception_only', 'format_list', 'format_stack', - 'format_tb', 'print_exc', 'format_exc', 'print_exception', - 'print_last', 'print_stack', 'print_tb', 'tb_lineno'] - -def _print(file, str='', terminator='\n'): - file.write(str+terminator) - - -def print_list(extracted_list, file=None): - """Print the list of tuples as returned by extract_tb() or - extract_stack() as a formatted stack trace to the given file.""" - if file is None: - file = sys.stderr - for filename, lineno, name, line in extracted_list: - _print(file, - ' File "%s", line %d, in %s' % (filename,lineno,name)) - if line: - _print(file, ' %s' % line.strip()) - -def format_list(extracted_list): - """Format a list of traceback entry tuples for printing. - - Given a list of tuples as returned by extract_tb() or - extract_stack(), return a list of strings ready for printing. - Each string in the resulting list corresponds to the item with the - same index in the argument list. Each string ends in a newline; - the strings may contain internal newlines as well, for those items - whose source text line is not None. - """ - list = [] - for filename, lineno, name, line in extracted_list: - item = ' File "%s", line %d, in %s\n' % (filename,lineno,name) - if line: - item = item + ' %s\n' % line.strip() - list.append(item) - return list - - -def print_tb(tb, limit=None, file=None): - """Print up to 'limit' stack trace entries from the traceback 'tb'. - - If 'limit' is omitted or None, all entries are printed. If 'file' - is omitted or None, the output goes to sys.stderr; otherwise - 'file' should be an open file or file-like object with a write() - method. - """ - if file is None: - file = sys.stderr - if limit is None: - if hasattr(sys, 'tracebacklimit'): - limit = sys.tracebacklimit - n = 0 - while tb is not None and (limit is None or n < limit): - f = tb.tb_frame - lineno = tb.tb_lineno - co = f.f_code - filename = co.co_filename - name = co.co_name - _print(file, - ' File "%s", line %d, in %s' % (filename,lineno,name)) - linecache.checkcache(filename) - line = linecache.getline(filename, lineno, f.f_globals) - if line: _print(file, ' ' + line.strip()) - tb = tb.tb_next - n = n+1 - -def format_tb(tb, limit = None): - """A shorthand for 'format_list(extract_stack(f, limit)).""" - return format_list(extract_tb(tb, limit)) - -def extract_tb(tb, limit = None): - """Return list of up to limit pre-processed entries from traceback. - - This is useful for alternate formatting of stack traces. If - 'limit' is omitted or None, all entries are extracted. A - pre-processed stack trace entry is a quadruple (filename, line - number, function name, text) representing the information that is - usually printed for a stack trace. The text is a string with - leading and trailing whitespace stripped; if the source is not - available it is None. - """ - if limit is None: - if hasattr(sys, 'tracebacklimit'): - limit = sys.tracebacklimit - list = [] - n = 0 - while tb is not None and (limit is None or n < limit): - f = tb.tb_frame - lineno = tb.tb_lineno - co = f.f_code - filename = co.co_filename - name = co.co_name - linecache.checkcache(filename) - line = linecache.getline(filename, lineno, f.f_globals) - if line: line = line.strip() - else: line = None - list.append((filename, lineno, name, line)) - tb = tb.tb_next - n = n+1 - return list - - -def print_exception(etype, value, tb, limit=None, file=None): - """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. - - This differs from print_tb() in the following ways: (1) if - traceback is not None, it prints a header "Traceback (most recent - call last):"; (2) it prints the exception type and value after the - stack trace; (3) if type is SyntaxError and value has the - appropriate format, it prints the line where the syntax error - occurred with a caret on the next line indicating the approximate - position of the error. - """ - if file is None: - file = sys.stderr - if tb: - _print(file, 'Traceback (most recent call last):') - print_tb(tb, limit, file) - lines = format_exception_only(etype, value) - for line in lines[:-1]: - _print(file, line, ' ') - _print(file, lines[-1], '') - -def format_exception(etype, value, tb, limit = None): - """Format a stack trace and the exception information. - - The arguments have the same meaning as the corresponding arguments - to print_exception(). The return value is a list of strings, each - ending in a newline and some containing internal newlines. When - these lines are concatenated and printed, exactly the same text is - printed as does print_exception(). - """ - if tb: - list = ['Traceback (most recent call last):\n'] - list = list + format_tb(tb, limit) - else: - list = [] - list = list + format_exception_only(etype, value) - return list - -def format_exception_only(etype, value): - """Format the exception part of a traceback. - - The arguments are the exception type and value such as given by - sys.last_type and sys.last_value. The return value is a list of - strings, each ending in a newline. - - Normally, the list contains a single string; however, for - SyntaxError exceptions, it contains several lines that (when - printed) display detailed information about where the syntax - error occurred. - - The message indicating which exception occurred is always the last - string in the list. - - """ - - # An instance should not have a meaningful value parameter, but - # sometimes does, particularly for string exceptions, such as - # >>> raise string1, string2 # deprecated - # - # Clear these out first because issubtype(string1, SyntaxError) - # would throw another exception and mask the original problem. - if (isinstance(etype, BaseException) or - isinstance(etype, types.InstanceType) or - etype is None or type(etype) is str): - return [_format_final_exc_line(etype, value)] - - stype = etype.__name__ - - if not issubclass(etype, SyntaxError): - return [_format_final_exc_line(stype, value)] - - # It was a syntax error; show exactly where the problem was found. - lines = [] - try: - msg, (filename, lineno, offset, badline) = value.args - except Exception: - pass - else: - filename = filename or "" - lines.append(' File "%s", line %d\n' % (filename, lineno)) - if badline is not None: - lines.append(' %s\n' % badline.strip()) - if offset is not None: - caretspace = badline[:offset].lstrip() - # non-space whitespace (likes tabs) must be kept for alignment - caretspace = ((c.isspace() and c or ' ') for c in caretspace) - # only three spaces to account for offset1 == pos 0 - lines.append(' %s^\n' % ''.join(caretspace)) - value = msg - - lines.append(_format_final_exc_line(stype, value)) - return lines - -def _format_final_exc_line(etype, value): - """Return a list of a single line -- normal case for format_exception_only""" - valuestr = _some_str(value) - if value is None or not valuestr: - line = "%s\n" % etype - else: - line = "%s: %s\n" % (etype, valuestr) - return line - -def _some_str(value): - try: - return str(value) - except: - return '' % type(value).__name__ - - -def print_exc(limit=None, file=None): - """Shorthand for 'print_exception(sys.exc_type, sys.exc_value, sys.exc_traceback, limit, file)'. - (In fact, it uses sys.exc_info() to retrieve the same information - in a thread-safe way.)""" - if file is None: - file = sys.stderr - try: - etype, value, tb = sys.exc_info() - print_exception(etype, value, tb, limit, file) - finally: - etype = value = tb = None - - -def format_exc(limit=None): - """Like print_exc() but return a string.""" - try: - etype, value, tb = sys.exc_info() - return ''.join(format_exception(etype, value, tb, limit)) - finally: - etype = value = tb = None - - -def print_last(limit=None, file=None): - """This is a shorthand for 'print_exception(sys.last_type, - sys.last_value, sys.last_traceback, limit, file)'.""" - if file is None: - file = sys.stderr - print_exception(sys.last_type, sys.last_value, sys.last_traceback, - limit, file) - - -def print_stack(f=None, limit=None, file=None): - """Print a stack trace from its invocation point. - - The optional 'f' argument can be used to specify an alternate - stack frame at which to start. The optional 'limit' and 'file' - arguments have the same meaning as for print_exception(). - """ - if f is None: - try: - raise ZeroDivisionError - except ZeroDivisionError: - f = sys.exc_info()[2].tb_frame.f_back - print_list(extract_stack(f, limit), file) - -def format_stack(f=None, limit=None): - """Shorthand for 'format_list(extract_stack(f, limit))'.""" - if f is None: - try: - raise ZeroDivisionError - except ZeroDivisionError: - f = sys.exc_info()[2].tb_frame.f_back - return format_list(extract_stack(f, limit)) - -def extract_stack(f=None, limit = None): - """Extract the raw traceback from the current stack frame. - - The return value has the same format as for extract_tb(). The - optional 'f' and 'limit' arguments have the same meaning as for - print_stack(). Each item in the list is a quadruple (filename, - line number, function name, text), and the entries are in order - from oldest to newest stack frame. - """ - if f is None: - try: - raise ZeroDivisionError - except ZeroDivisionError: - f = sys.exc_info()[2].tb_frame.f_back - if limit is None: - if hasattr(sys, 'tracebacklimit'): - limit = sys.tracebacklimit - list = [] - n = 0 - while f is not None and (limit is None or n < limit): - lineno = f.f_lineno - co = f.f_code - filename = co.co_filename - name = co.co_name - linecache.checkcache(filename) - line = linecache.getline(filename, lineno, f.f_globals) - if line: line = line.strip() - else: line = None - list.append((filename, lineno, name, line)) - f = f.f_back - n = n+1 - list.reverse() - return list - -def tb_lineno(tb): - """Calculate correct line number of traceback given in tb. - - Obsolete in 2.3. - """ - return tb.tb_lineno diff --git a/icarus-miner/data/usr/lib/python2.6/traceback.pyc b/icarus-miner/data/usr/lib/python2.6/traceback.pyc deleted file mode 100644 index 31792aa..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/traceback.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/types.py b/icarus-miner/data/usr/lib/python2.6/types.py deleted file mode 100644 index ea316fa..0000000 --- a/icarus-miner/data/usr/lib/python2.6/types.py +++ /dev/null @@ -1,93 +0,0 @@ -"""Define names for all type symbols known in the standard interpreter. - -Types that are part of optional modules (e.g. array) are not listed. -""" -import sys - -# Iterators in Python aren't a matter of type but of protocol. A large -# and changing number of builtin types implement *some* flavor of -# iterator. Don't check the type! Use hasattr to check for both -# "__iter__" and "next" attributes instead. - -NoneType = type(None) -TypeType = type -ObjectType = object - -IntType = int -LongType = long -FloatType = float -BooleanType = bool -try: - ComplexType = complex -except NameError: - pass - -StringType = str - -# StringTypes is already outdated. Instead of writing "type(x) in -# types.StringTypes", you should use "isinstance(x, basestring)". But -# we keep around for compatibility with Python 2.2. -try: - UnicodeType = unicode - StringTypes = (StringType, UnicodeType) -except NameError: - StringTypes = (StringType,) - -BufferType = buffer - -TupleType = tuple -ListType = list -DictType = DictionaryType = dict - -def _f(): pass -FunctionType = type(_f) -LambdaType = type(lambda: None) # Same as FunctionType -try: - CodeType = type(_f.func_code) -except RuntimeError: - # Execution in restricted environment - pass - -def _g(): - yield 1 -GeneratorType = type(_g()) - -class _C: - def _m(self): pass -ClassType = type(_C) -UnboundMethodType = type(_C._m) # Same as MethodType -_x = _C() -InstanceType = type(_x) -MethodType = type(_x._m) - -BuiltinFunctionType = type(len) -BuiltinMethodType = type([].append) # Same as BuiltinFunctionType - -ModuleType = type(sys) -FileType = file -XRangeType = xrange - -try: - raise TypeError -except TypeError: - try: - tb = sys.exc_info()[2] - TracebackType = type(tb) - FrameType = type(tb.tb_frame) - except AttributeError: - # In the restricted environment, exc_info returns (None, None, - # None) Then, tb.tb_frame gives an attribute error - pass - tb = None; del tb - -SliceType = slice -EllipsisType = type(Ellipsis) - -DictProxyType = type(TypeType.__dict__) -NotImplementedType = type(NotImplemented) - -# For Jython, the following two types are identical -GetSetDescriptorType = type(FunctionType.func_code) -MemberDescriptorType = type(FunctionType.func_globals) - -del sys, _f, _g, _C, _x # Not for export diff --git a/icarus-miner/data/usr/lib/python2.6/types.pyc b/icarus-miner/data/usr/lib/python2.6/types.pyc deleted file mode 100644 index adb51e6..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/types.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/urllib.py b/icarus-miner/data/usr/lib/python2.6/urllib.py deleted file mode 100644 index 1ae08a0..0000000 --- a/icarus-miner/data/usr/lib/python2.6/urllib.py +++ /dev/null @@ -1,1625 +0,0 @@ -"""Open an arbitrary URL. - -See the following document for more info on URLs: -"Names and Addresses, URIs, URLs, URNs, URCs", at -http://www.w3.org/pub/WWW/Addressing/Overview.html - -See also the HTTP spec (from which the error codes are derived): -"HTTP - Hypertext Transfer Protocol", at -http://www.w3.org/pub/WWW/Protocols/ - -Related standards and specs: -- RFC1808: the "relative URL" spec. (authoritative status) -- RFC1738 - the "URL standard". (authoritative status) -- RFC1630 - the "URI spec". (informational status) - -The object returned by URLopener().open(file) will differ per -protocol. All you know is that is has methods read(), readline(), -readlines(), fileno(), close() and info(). The read*(), fileno() -and close() methods work like those of open files. -The info() method returns a mimetools.Message object which can be -used to query various info about the object, if available. -(mimetools.Message objects are queried with the getheader() method.) -""" - -import string -import socket -import os -import time -import sys -from urlparse import urljoin as basejoin -import warnings - -__all__ = ["urlopen", "URLopener", "FancyURLopener", "urlretrieve", - "urlcleanup", "quote", "quote_plus", "unquote", "unquote_plus", - "urlencode", "url2pathname", "pathname2url", "splittag", - "localhost", "thishost", "ftperrors", "basejoin", "unwrap", - "splittype", "splithost", "splituser", "splitpasswd", "splitport", - "splitnport", "splitquery", "splitattr", "splitvalue", - "getproxies"] - -__version__ = '1.17' # XXX This version is not always updated :-( - -MAXFTPCACHE = 10 # Trim the ftp cache beyond this size - -# Helper for non-unix systems -if os.name == 'mac': - from macurl2path import url2pathname, pathname2url -elif os.name == 'nt': - from nturl2path import url2pathname, pathname2url -elif os.name == 'riscos': - from rourl2path import url2pathname, pathname2url -else: - def url2pathname(pathname): - """OS-specific conversion from a relative URL of the 'file' scheme - to a file system path; not recommended for general use.""" - return unquote(pathname) - - def pathname2url(pathname): - """OS-specific conversion from a file system path to a relative URL - of the 'file' scheme; not recommended for general use.""" - return quote(pathname) - -# This really consists of two pieces: -# (1) a class which handles opening of all sorts of URLs -# (plus assorted utilities etc.) -# (2) a set of functions for parsing URLs -# XXX Should these be separated out into different modules? - - -# Shortcut for basic usage -_urlopener = None -def urlopen(url, data=None, proxies=None): - """Create a file-like object for the specified URL to read from.""" - from warnings import warnpy3k - warnings.warnpy3k("urllib.urlopen() has been removed in Python 3.0 in " - "favor of urllib2.urlopen()", stacklevel=2) - - global _urlopener - if proxies is not None: - opener = FancyURLopener(proxies=proxies) - elif not _urlopener: - opener = FancyURLopener() - _urlopener = opener - else: - opener = _urlopener - if data is None: - return opener.open(url) - else: - return opener.open(url, data) -def urlretrieve(url, filename=None, reporthook=None, data=None): - global _urlopener - if not _urlopener: - _urlopener = FancyURLopener() - return _urlopener.retrieve(url, filename, reporthook, data) -def urlcleanup(): - if _urlopener: - _urlopener.cleanup() - -# check for SSL -try: - import ssl -except: - _have_ssl = False -else: - _have_ssl = True - -# exception raised when downloaded size does not match content-length -class ContentTooShortError(IOError): - def __init__(self, message, content): - IOError.__init__(self, message) - self.content = content - -ftpcache = {} -class URLopener: - """Class to open URLs. - This is a class rather than just a subroutine because we may need - more than one set of global protocol-specific options. - Note -- this is a base class for those who don't want the - automatic handling of errors type 302 (relocated) and 401 - (authorization needed).""" - - __tempfiles = None - - version = "Python-urllib/%s" % __version__ - - # Constructor - def __init__(self, proxies=None, **x509): - if proxies is None: - proxies = getproxies() - assert hasattr(proxies, 'has_key'), "proxies must be a mapping" - self.proxies = proxies - self.key_file = x509.get('key_file') - self.cert_file = x509.get('cert_file') - self.addheaders = [('User-Agent', self.version)] - self.__tempfiles = [] - self.__unlink = os.unlink # See cleanup() - self.tempcache = None - # Undocumented feature: if you assign {} to tempcache, - # it is used to cache files retrieved with - # self.retrieve(). This is not enabled by default - # since it does not work for changing documents (and I - # haven't got the logic to check expiration headers - # yet). - self.ftpcache = ftpcache - # Undocumented feature: you can use a different - # ftp cache by assigning to the .ftpcache member; - # in case you want logically independent URL openers - # XXX This is not threadsafe. Bah. - - def __del__(self): - self.close() - - def close(self): - self.cleanup() - - def cleanup(self): - # This code sometimes runs when the rest of this module - # has already been deleted, so it can't use any globals - # or import anything. - if self.__tempfiles: - for file in self.__tempfiles: - try: - self.__unlink(file) - except OSError: - pass - del self.__tempfiles[:] - if self.tempcache: - self.tempcache.clear() - - def addheader(self, *args): - """Add a header to be used by the HTTP interface only - e.g. u.addheader('Accept', 'sound/basic')""" - self.addheaders.append(args) - - # External interface - def open(self, fullurl, data=None): - """Use URLopener().open(file) instead of open(file, 'r').""" - fullurl = unwrap(toBytes(fullurl)) - # percent encode url. fixing lame server errors like space within url - # parts - fullurl = quote(fullurl, safe="%/:=&?~#+!$,;'@()*[]") - if self.tempcache and fullurl in self.tempcache: - filename, headers = self.tempcache[fullurl] - fp = open(filename, 'rb') - return addinfourl(fp, headers, fullurl) - urltype, url = splittype(fullurl) - if not urltype: - urltype = 'file' - if urltype in self.proxies: - proxy = self.proxies[urltype] - urltype, proxyhost = splittype(proxy) - host, selector = splithost(proxyhost) - url = (host, fullurl) # Signal special case to open_*() - else: - proxy = None - name = 'open_' + urltype - self.type = urltype - name = name.replace('-', '_') - if not hasattr(self, name): - if proxy: - return self.open_unknown_proxy(proxy, fullurl, data) - else: - return self.open_unknown(fullurl, data) - try: - if data is None: - return getattr(self, name)(url) - else: - return getattr(self, name)(url, data) - except socket.error, msg: - raise IOError, ('socket error', msg), sys.exc_info()[2] - - def open_unknown(self, fullurl, data=None): - """Overridable interface to open unknown URL type.""" - type, url = splittype(fullurl) - raise IOError, ('url error', 'unknown url type', type) - - def open_unknown_proxy(self, proxy, fullurl, data=None): - """Overridable interface to open unknown URL type.""" - type, url = splittype(fullurl) - raise IOError, ('url error', 'invalid proxy for %s' % type, proxy) - - # External interface - def retrieve(self, url, filename=None, reporthook=None, data=None): - """retrieve(url) returns (filename, headers) for a local object - or (tempfilename, headers) for a remote object.""" - url = unwrap(toBytes(url)) - if self.tempcache and url in self.tempcache: - return self.tempcache[url] - type, url1 = splittype(url) - if filename is None and (not type or type == 'file'): - try: - fp = self.open_local_file(url1) - hdrs = fp.info() - del fp - return url2pathname(splithost(url1)[1]), hdrs - except IOError, msg: - pass - fp = self.open(url, data) - try: - headers = fp.info() - if filename: - tfp = open(filename, 'wb') - else: - import tempfile - garbage, path = splittype(url) - garbage, path = splithost(path or "") - path, garbage = splitquery(path or "") - path, garbage = splitattr(path or "") - suffix = os.path.splitext(path)[1] - (fd, filename) = tempfile.mkstemp(suffix) - self.__tempfiles.append(filename) - tfp = os.fdopen(fd, 'wb') - try: - result = filename, headers - if self.tempcache is not None: - self.tempcache[url] = result - bs = 1024*8 - size = -1 - read = 0 - blocknum = 0 - if reporthook: - if "content-length" in headers: - size = int(headers["Content-Length"]) - reporthook(blocknum, bs, size) - while 1: - block = fp.read(bs) - if block == "": - break - read += len(block) - tfp.write(block) - blocknum += 1 - if reporthook: - reporthook(blocknum, bs, size) - finally: - tfp.close() - finally: - fp.close() - del fp - del tfp - - # raise exception if actual size does not match content-length header - if size >= 0 and read < size: - raise ContentTooShortError("retrieval incomplete: got only %i out " - "of %i bytes" % (read, size), result) - - return result - - # Each method named open_ knows how to open that type of URL - - def open_http(self, url, data=None): - """Use HTTP protocol.""" - import httplib - user_passwd = None - proxy_passwd= None - if isinstance(url, str): - host, selector = splithost(url) - if host: - user_passwd, host = splituser(host) - host = unquote(host) - realhost = host - else: - host, selector = url - # check whether the proxy contains authorization information - proxy_passwd, host = splituser(host) - # now we proceed with the url we want to obtain - urltype, rest = splittype(selector) - url = rest - user_passwd = None - if urltype.lower() != 'http': - realhost = None - else: - realhost, rest = splithost(rest) - if realhost: - user_passwd, realhost = splituser(realhost) - if user_passwd: - selector = "%s://%s%s" % (urltype, realhost, rest) - if proxy_bypass(realhost): - host = realhost - - #print "proxy via http:", host, selector - if not host: raise IOError, ('http error', 'no host given') - - if proxy_passwd: - import base64 - proxy_auth = base64.b64encode(proxy_passwd).strip() - else: - proxy_auth = None - - if user_passwd: - import base64 - auth = base64.b64encode(user_passwd).strip() - else: - auth = None - h = httplib.HTTP(host) - if data is not None: - h.putrequest('POST', selector) - h.putheader('Content-Type', 'application/x-www-form-urlencoded') - h.putheader('Content-Length', '%d' % len(data)) - else: - h.putrequest('GET', selector) - if proxy_auth: h.putheader('Proxy-Authorization', 'Basic %s' % proxy_auth) - if auth: h.putheader('Authorization', 'Basic %s' % auth) - if realhost: h.putheader('Host', realhost) - for args in self.addheaders: h.putheader(*args) - h.endheaders() - if data is not None: - h.send(data) - errcode, errmsg, headers = h.getreply() - fp = h.getfile() - if errcode == -1: - if fp: fp.close() - # something went wrong with the HTTP status line - raise IOError, ('http protocol error', 0, - 'got a bad status line', None) - # According to RFC 2616, "2xx" code indicates that the client's - # request was successfully received, understood, and accepted. - if (200 <= errcode < 300): - return addinfourl(fp, headers, "http:" + url, errcode) - else: - if data is None: - return self.http_error(url, fp, errcode, errmsg, headers) - else: - return self.http_error(url, fp, errcode, errmsg, headers, data) - - def http_error(self, url, fp, errcode, errmsg, headers, data=None): - """Handle http errors. - Derived class can override this, or provide specific handlers - named http_error_DDD where DDD is the 3-digit error code.""" - # First check if there's a specific handler for this error - name = 'http_error_%d' % errcode - if hasattr(self, name): - method = getattr(self, name) - if data is None: - result = method(url, fp, errcode, errmsg, headers) - else: - result = method(url, fp, errcode, errmsg, headers, data) - if result: return result - return self.http_error_default(url, fp, errcode, errmsg, headers) - - def http_error_default(self, url, fp, errcode, errmsg, headers): - """Default error handler: close the connection and raise IOError.""" - void = fp.read() - fp.close() - raise IOError, ('http error', errcode, errmsg, headers) - - if _have_ssl: - def open_https(self, url, data=None): - """Use HTTPS protocol.""" - - import httplib - user_passwd = None - proxy_passwd = None - if isinstance(url, str): - host, selector = splithost(url) - if host: - user_passwd, host = splituser(host) - host = unquote(host) - realhost = host - else: - host, selector = url - # here, we determine, whether the proxy contains authorization information - proxy_passwd, host = splituser(host) - urltype, rest = splittype(selector) - url = rest - user_passwd = None - if urltype.lower() != 'https': - realhost = None - else: - realhost, rest = splithost(rest) - if realhost: - user_passwd, realhost = splituser(realhost) - if user_passwd: - selector = "%s://%s%s" % (urltype, realhost, rest) - #print "proxy via https:", host, selector - if not host: raise IOError, ('https error', 'no host given') - if proxy_passwd: - import base64 - proxy_auth = base64.b64encode(proxy_passwd).strip() - else: - proxy_auth = None - if user_passwd: - import base64 - auth = base64.b64encode(user_passwd).strip() - else: - auth = None - h = httplib.HTTPS(host, 0, - key_file=self.key_file, - cert_file=self.cert_file) - if data is not None: - h.putrequest('POST', selector) - h.putheader('Content-Type', - 'application/x-www-form-urlencoded') - h.putheader('Content-Length', '%d' % len(data)) - else: - h.putrequest('GET', selector) - if proxy_auth: h.putheader('Proxy-Authorization', 'Basic %s' % proxy_auth) - if auth: h.putheader('Authorization', 'Basic %s' % auth) - if realhost: h.putheader('Host', realhost) - for args in self.addheaders: h.putheader(*args) - h.endheaders() - if data is not None: - h.send(data) - errcode, errmsg, headers = h.getreply() - fp = h.getfile() - if errcode == -1: - if fp: fp.close() - # something went wrong with the HTTP status line - raise IOError, ('http protocol error', 0, - 'got a bad status line', None) - # According to RFC 2616, "2xx" code indicates that the client's - # request was successfully received, understood, and accepted. - if (200 <= errcode < 300): - return addinfourl(fp, headers, "https:" + url, errcode) - else: - if data is None: - return self.http_error(url, fp, errcode, errmsg, headers) - else: - return self.http_error(url, fp, errcode, errmsg, headers, - data) - - def open_file(self, url): - """Use local file or FTP depending on form of URL.""" - if not isinstance(url, str): - raise IOError, ('file error', 'proxy support for file protocol currently not implemented') - if url[:2] == '//' and url[2:3] != '/' and url[2:12].lower() != 'localhost/': - return self.open_ftp(url) - else: - return self.open_local_file(url) - - def open_local_file(self, url): - """Use local file.""" - import mimetypes, mimetools, email.utils - try: - from cStringIO import StringIO - except ImportError: - from StringIO import StringIO - host, file = splithost(url) - localname = url2pathname(file) - try: - stats = os.stat(localname) - except OSError, e: - raise IOError(e.errno, e.strerror, e.filename) - size = stats.st_size - modified = email.utils.formatdate(stats.st_mtime, usegmt=True) - mtype = mimetypes.guess_type(url)[0] - headers = mimetools.Message(StringIO( - 'Content-Type: %s\nContent-Length: %d\nLast-modified: %s\n' % - (mtype or 'text/plain', size, modified))) - if not host: - urlfile = file - if file[:1] == '/': - urlfile = 'file://' + file - return addinfourl(open(localname, 'rb'), - headers, urlfile) - host, port = splitport(host) - if not port \ - and socket.gethostbyname(host) in (localhost(), thishost()): - urlfile = file - if file[:1] == '/': - urlfile = 'file://' + file - return addinfourl(open(localname, 'rb'), - headers, urlfile) - raise IOError, ('local file error', 'not on local host') - - def open_ftp(self, url): - """Use FTP protocol.""" - if not isinstance(url, str): - raise IOError, ('ftp error', 'proxy support for ftp protocol currently not implemented') - import mimetypes, mimetools - try: - from cStringIO import StringIO - except ImportError: - from StringIO import StringIO - host, path = splithost(url) - if not host: raise IOError, ('ftp error', 'no host given') - host, port = splitport(host) - user, host = splituser(host) - if user: user, passwd = splitpasswd(user) - else: passwd = None - host = unquote(host) - user = unquote(user or '') - passwd = unquote(passwd or '') - host = socket.gethostbyname(host) - if not port: - import ftplib - port = ftplib.FTP_PORT - else: - port = int(port) - path, attrs = splitattr(path) - path = unquote(path) - dirs = path.split('/') - dirs, file = dirs[:-1], dirs[-1] - if dirs and not dirs[0]: dirs = dirs[1:] - if dirs and not dirs[0]: dirs[0] = '/' - key = user, host, port, '/'.join(dirs) - # XXX thread unsafe! - if len(self.ftpcache) > MAXFTPCACHE: - # Prune the cache, rather arbitrarily - for k in self.ftpcache.keys(): - if k != key: - v = self.ftpcache[k] - del self.ftpcache[k] - v.close() - try: - if not key in self.ftpcache: - self.ftpcache[key] = \ - ftpwrapper(user, passwd, host, port, dirs) - if not file: type = 'D' - else: type = 'I' - for attr in attrs: - attr, value = splitvalue(attr) - if attr.lower() == 'type' and \ - value in ('a', 'A', 'i', 'I', 'd', 'D'): - type = value.upper() - (fp, retrlen) = self.ftpcache[key].retrfile(file, type) - mtype = mimetypes.guess_type("ftp:" + url)[0] - headers = "" - if mtype: - headers += "Content-Type: %s\n" % mtype - if retrlen is not None and retrlen >= 0: - headers += "Content-Length: %d\n" % retrlen - headers = mimetools.Message(StringIO(headers)) - return addinfourl(fp, headers, "ftp:" + url) - except ftperrors(), msg: - raise IOError, ('ftp error', msg), sys.exc_info()[2] - - def open_data(self, url, data=None): - """Use "data" URL.""" - if not isinstance(url, str): - raise IOError, ('data error', 'proxy support for data protocol currently not implemented') - # ignore POSTed data - # - # syntax of data URLs: - # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data - # mediatype := [ type "/" subtype ] *( ";" parameter ) - # data := *urlchar - # parameter := attribute "=" value - import mimetools - try: - from cStringIO import StringIO - except ImportError: - from StringIO import StringIO - try: - [type, data] = url.split(',', 1) - except ValueError: - raise IOError, ('data error', 'bad data URL') - if not type: - type = 'text/plain;charset=US-ASCII' - semi = type.rfind(';') - if semi >= 0 and '=' not in type[semi:]: - encoding = type[semi+1:] - type = type[:semi] - else: - encoding = '' - msg = [] - msg.append('Date: %s'%time.strftime('%a, %d %b %Y %T GMT', - time.gmtime(time.time()))) - msg.append('Content-type: %s' % type) - if encoding == 'base64': - import base64 - data = base64.decodestring(data) - else: - data = unquote(data) - msg.append('Content-Length: %d' % len(data)) - msg.append('') - msg.append(data) - msg = '\n'.join(msg) - f = StringIO(msg) - headers = mimetools.Message(f, 0) - #f.fileno = None # needed for addinfourl - return addinfourl(f, headers, url) - - -class FancyURLopener(URLopener): - """Derived class with handlers for errors we can handle (perhaps).""" - - def __init__(self, *args, **kwargs): - URLopener.__init__(self, *args, **kwargs) - self.auth_cache = {} - self.tries = 0 - self.maxtries = 10 - - def http_error_default(self, url, fp, errcode, errmsg, headers): - """Default error handling -- don't raise an exception.""" - return addinfourl(fp, headers, "http:" + url, errcode) - - def http_error_302(self, url, fp, errcode, errmsg, headers, data=None): - """Error 302 -- relocated (temporarily).""" - self.tries += 1 - if self.maxtries and self.tries >= self.maxtries: - if hasattr(self, "http_error_500"): - meth = self.http_error_500 - else: - meth = self.http_error_default - self.tries = 0 - return meth(url, fp, 500, - "Internal Server Error: Redirect Recursion", headers) - result = self.redirect_internal(url, fp, errcode, errmsg, headers, - data) - self.tries = 0 - return result - - def redirect_internal(self, url, fp, errcode, errmsg, headers, data): - if 'location' in headers: - newurl = headers['location'] - elif 'uri' in headers: - newurl = headers['uri'] - else: - return - void = fp.read() - fp.close() - # In case the server sent a relative URL, join with original: - newurl = basejoin(self.type + ":" + url, newurl) - return self.open(newurl) - - def http_error_301(self, url, fp, errcode, errmsg, headers, data=None): - """Error 301 -- also relocated (permanently).""" - return self.http_error_302(url, fp, errcode, errmsg, headers, data) - - def http_error_303(self, url, fp, errcode, errmsg, headers, data=None): - """Error 303 -- also relocated (essentially identical to 302).""" - return self.http_error_302(url, fp, errcode, errmsg, headers, data) - - def http_error_307(self, url, fp, errcode, errmsg, headers, data=None): - """Error 307 -- relocated, but turn POST into error.""" - if data is None: - return self.http_error_302(url, fp, errcode, errmsg, headers, data) - else: - return self.http_error_default(url, fp, errcode, errmsg, headers) - - def http_error_401(self, url, fp, errcode, errmsg, headers, data=None): - """Error 401 -- authentication required. - This function supports Basic authentication only.""" - if not 'www-authenticate' in headers: - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - stuff = headers['www-authenticate'] - import re - match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) - if not match: - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - scheme, realm = match.groups() - if scheme.lower() != 'basic': - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - name = 'retry_' + self.type + '_basic_auth' - if data is None: - return getattr(self,name)(url, realm) - else: - return getattr(self,name)(url, realm, data) - - def http_error_407(self, url, fp, errcode, errmsg, headers, data=None): - """Error 407 -- proxy authentication required. - This function supports Basic authentication only.""" - if not 'proxy-authenticate' in headers: - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - stuff = headers['proxy-authenticate'] - import re - match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff) - if not match: - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - scheme, realm = match.groups() - if scheme.lower() != 'basic': - URLopener.http_error_default(self, url, fp, - errcode, errmsg, headers) - name = 'retry_proxy_' + self.type + '_basic_auth' - if data is None: - return getattr(self,name)(url, realm) - else: - return getattr(self,name)(url, realm, data) - - def retry_proxy_http_basic_auth(self, url, realm, data=None): - host, selector = splithost(url) - newurl = 'http://' + host + selector - proxy = self.proxies['http'] - urltype, proxyhost = splittype(proxy) - proxyhost, proxyselector = splithost(proxyhost) - i = proxyhost.find('@') + 1 - proxyhost = proxyhost[i:] - user, passwd = self.get_user_passwd(proxyhost, realm, i) - if not (user or passwd): return None - proxyhost = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + proxyhost - self.proxies['http'] = 'http://' + proxyhost + proxyselector - if data is None: - return self.open(newurl) - else: - return self.open(newurl, data) - - def retry_proxy_https_basic_auth(self, url, realm, data=None): - host, selector = splithost(url) - newurl = 'https://' + host + selector - proxy = self.proxies['https'] - urltype, proxyhost = splittype(proxy) - proxyhost, proxyselector = splithost(proxyhost) - i = proxyhost.find('@') + 1 - proxyhost = proxyhost[i:] - user, passwd = self.get_user_passwd(proxyhost, realm, i) - if not (user or passwd): return None - proxyhost = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + proxyhost - self.proxies['https'] = 'https://' + proxyhost + proxyselector - if data is None: - return self.open(newurl) - else: - return self.open(newurl, data) - - def retry_http_basic_auth(self, url, realm, data=None): - host, selector = splithost(url) - i = host.find('@') + 1 - host = host[i:] - user, passwd = self.get_user_passwd(host, realm, i) - if not (user or passwd): return None - host = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + host - newurl = 'http://' + host + selector - if data is None: - return self.open(newurl) - else: - return self.open(newurl, data) - - def retry_https_basic_auth(self, url, realm, data=None): - host, selector = splithost(url) - i = host.find('@') + 1 - host = host[i:] - user, passwd = self.get_user_passwd(host, realm, i) - if not (user or passwd): return None - host = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + host - newurl = 'https://' + host + selector - if data is None: - return self.open(newurl) - else: - return self.open(newurl, data) - - def get_user_passwd(self, host, realm, clear_cache = 0): - key = realm + '@' + host.lower() - if key in self.auth_cache: - if clear_cache: - del self.auth_cache[key] - else: - return self.auth_cache[key] - user, passwd = self.prompt_user_passwd(host, realm) - if user or passwd: self.auth_cache[key] = (user, passwd) - return user, passwd - - def prompt_user_passwd(self, host, realm): - """Override this in a GUI environment!""" - import getpass - try: - user = raw_input("Enter username for %s at %s: " % (realm, - host)) - passwd = getpass.getpass("Enter password for %s in %s at %s: " % - (user, realm, host)) - return user, passwd - except KeyboardInterrupt: - print - return None, None - - -# Utility functions - -_localhost = None -def localhost(): - """Return the IP address of the magic hostname 'localhost'.""" - global _localhost - if _localhost is None: - _localhost = socket.gethostbyname('localhost') - return _localhost - -_thishost = None -def thishost(): - """Return the IP address of the current host.""" - global _thishost - if _thishost is None: - _thishost = socket.gethostbyname(socket.gethostname()) - return _thishost - -_ftperrors = None -def ftperrors(): - """Return the set of errors raised by the FTP class.""" - global _ftperrors - if _ftperrors is None: - import ftplib - _ftperrors = ftplib.all_errors - return _ftperrors - -_noheaders = None -def noheaders(): - """Return an empty mimetools.Message object.""" - global _noheaders - if _noheaders is None: - import mimetools - try: - from cStringIO import StringIO - except ImportError: - from StringIO import StringIO - _noheaders = mimetools.Message(StringIO(), 0) - _noheaders.fp.close() # Recycle file descriptor - return _noheaders - - -# Utility classes - -class ftpwrapper: - """Class used by open_ftp() for cache of open FTP connections.""" - - def __init__(self, user, passwd, host, port, dirs, - timeout=socket._GLOBAL_DEFAULT_TIMEOUT): - self.user = user - self.passwd = passwd - self.host = host - self.port = port - self.dirs = dirs - self.timeout = timeout - self.init() - - def init(self): - import ftplib - self.busy = 0 - self.ftp = ftplib.FTP() - self.ftp.connect(self.host, self.port, self.timeout) - self.ftp.login(self.user, self.passwd) - for dir in self.dirs: - self.ftp.cwd(dir) - - def retrfile(self, file, type): - import ftplib - self.endtransfer() - if type in ('d', 'D'): cmd = 'TYPE A'; isdir = 1 - else: cmd = 'TYPE ' + type; isdir = 0 - try: - self.ftp.voidcmd(cmd) - except ftplib.all_errors: - self.init() - self.ftp.voidcmd(cmd) - conn = None - if file and not isdir: - # Try to retrieve as a file - try: - cmd = 'RETR ' + file - conn = self.ftp.ntransfercmd(cmd) - except ftplib.error_perm, reason: - if str(reason)[:3] != '550': - raise IOError, ('ftp error', reason), sys.exc_info()[2] - if not conn: - # Set transfer mode to ASCII! - self.ftp.voidcmd('TYPE A') - # Try a directory listing. Verify that directory exists. - if file: - pwd = self.ftp.pwd() - try: - try: - self.ftp.cwd(file) - except ftplib.error_perm, reason: - raise IOError, ('ftp error', reason), sys.exc_info()[2] - finally: - self.ftp.cwd(pwd) - cmd = 'LIST ' + file - else: - cmd = 'LIST' - conn = self.ftp.ntransfercmd(cmd) - self.busy = 1 - # Pass back both a suitably decorated object and a retrieval length - return (addclosehook(conn[0].makefile('rb'), - self.endtransfer), conn[1]) - def endtransfer(self): - if not self.busy: - return - self.busy = 0 - try: - self.ftp.voidresp() - except ftperrors(): - pass - - def close(self): - self.endtransfer() - try: - self.ftp.close() - except ftperrors(): - pass - -class addbase: - """Base class for addinfo and addclosehook.""" - - def __init__(self, fp): - self.fp = fp - self.read = self.fp.read - self.readline = self.fp.readline - if hasattr(self.fp, "readlines"): self.readlines = self.fp.readlines - if hasattr(self.fp, "fileno"): - self.fileno = self.fp.fileno - else: - self.fileno = lambda: None - if hasattr(self.fp, "__iter__"): - self.__iter__ = self.fp.__iter__ - if hasattr(self.fp, "next"): - self.next = self.fp.next - - def __repr__(self): - return '<%s at %r whose fp = %r>' % (self.__class__.__name__, - id(self), self.fp) - - def close(self): - self.read = None - self.readline = None - self.readlines = None - self.fileno = None - if self.fp: self.fp.close() - self.fp = None - -class addclosehook(addbase): - """Class to add a close hook to an open file.""" - - def __init__(self, fp, closehook, *hookargs): - addbase.__init__(self, fp) - self.closehook = closehook - self.hookargs = hookargs - - def close(self): - addbase.close(self) - if self.closehook: - self.closehook(*self.hookargs) - self.closehook = None - self.hookargs = None - -class addinfo(addbase): - """class to add an info() method to an open file.""" - - def __init__(self, fp, headers): - addbase.__init__(self, fp) - self.headers = headers - - def info(self): - return self.headers - -class addinfourl(addbase): - """class to add info() and geturl() methods to an open file.""" - - def __init__(self, fp, headers, url, code=None): - addbase.__init__(self, fp) - self.headers = headers - self.url = url - self.code = code - - def info(self): - return self.headers - - def getcode(self): - return self.code - - def geturl(self): - return self.url - - -# Utilities to parse URLs (most of these return None for missing parts): -# unwrap('') --> 'type://host/path' -# splittype('type:opaquestring') --> 'type', 'opaquestring' -# splithost('//host[:port]/path') --> 'host[:port]', '/path' -# splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]' -# splitpasswd('user:passwd') -> 'user', 'passwd' -# splitport('host:port') --> 'host', 'port' -# splitquery('/path?query') --> '/path', 'query' -# splittag('/path#tag') --> '/path', 'tag' -# splitattr('/path;attr1=value1;attr2=value2;...') -> -# '/path', ['attr1=value1', 'attr2=value2', ...] -# splitvalue('attr=value') --> 'attr', 'value' -# unquote('abc%20def') -> 'abc def' -# quote('abc def') -> 'abc%20def') - -try: - unicode -except NameError: - def _is_unicode(x): - return 0 -else: - def _is_unicode(x): - return isinstance(x, unicode) - -def toBytes(url): - """toBytes(u"URL") --> 'URL'.""" - # Most URL schemes require ASCII. If that changes, the conversion - # can be relaxed - if _is_unicode(url): - try: - url = url.encode("ASCII") - except UnicodeError: - raise UnicodeError("URL " + repr(url) + - " contains non-ASCII characters") - return url - -def unwrap(url): - """unwrap('') --> 'type://host/path'.""" - url = url.strip() - if url[:1] == '<' and url[-1:] == '>': - url = url[1:-1].strip() - if url[:4] == 'URL:': url = url[4:].strip() - return url - -_typeprog = None -def splittype(url): - """splittype('type:opaquestring') --> 'type', 'opaquestring'.""" - global _typeprog - if _typeprog is None: - import re - _typeprog = re.compile('^([^/:]+):') - - match = _typeprog.match(url) - if match: - scheme = match.group(1) - return scheme.lower(), url[len(scheme) + 1:] - return None, url - -_hostprog = None -def splithost(url): - """splithost('//host[:port]/path') --> 'host[:port]', '/path'.""" - global _hostprog - if _hostprog is None: - import re - _hostprog = re.compile('^//([^/?]*)(.*)$') - - match = _hostprog.match(url) - if match: return match.group(1, 2) - return None, url - -_userprog = None -def splituser(host): - """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" - global _userprog - if _userprog is None: - import re - _userprog = re.compile('^(.*)@(.*)$') - - match = _userprog.match(host) - if match: return map(unquote, match.group(1, 2)) - return None, host - -_passwdprog = None -def splitpasswd(user): - """splitpasswd('user:passwd') -> 'user', 'passwd'.""" - global _passwdprog - if _passwdprog is None: - import re - _passwdprog = re.compile('^([^:]*):(.*)$') - - match = _passwdprog.match(user) - if match: return match.group(1, 2) - return user, None - -# splittag('/path#tag') --> '/path', 'tag' -_portprog = None -def splitport(host): - """splitport('host:port') --> 'host', 'port'.""" - global _portprog - if _portprog is None: - import re - _portprog = re.compile('^(.*):([0-9]+)$') - - match = _portprog.match(host) - if match: return match.group(1, 2) - return host, None - -_nportprog = None -def splitnport(host, defport=-1): - """Split host and port, returning numeric port. - Return given default port if no ':' found; defaults to -1. - Return numerical port if a valid number are found after ':'. - Return None if ':' but not a valid number.""" - global _nportprog - if _nportprog is None: - import re - _nportprog = re.compile('^(.*):(.*)$') - - match = _nportprog.match(host) - if match: - host, port = match.group(1, 2) - try: - if not port: raise ValueError, "no digits" - nport = int(port) - except ValueError: - nport = None - return host, nport - return host, defport - -_queryprog = None -def splitquery(url): - """splitquery('/path?query') --> '/path', 'query'.""" - global _queryprog - if _queryprog is None: - import re - _queryprog = re.compile('^(.*)\?([^?]*)$') - - match = _queryprog.match(url) - if match: return match.group(1, 2) - return url, None - -_tagprog = None -def splittag(url): - """splittag('/path#tag') --> '/path', 'tag'.""" - global _tagprog - if _tagprog is None: - import re - _tagprog = re.compile('^(.*)#([^#]*)$') - - match = _tagprog.match(url) - if match: return match.group(1, 2) - return url, None - -def splitattr(url): - """splitattr('/path;attr1=value1;attr2=value2;...') -> - '/path', ['attr1=value1', 'attr2=value2', ...].""" - words = url.split(';') - return words[0], words[1:] - -_valueprog = None -def splitvalue(attr): - """splitvalue('attr=value') --> 'attr', 'value'.""" - global _valueprog - if _valueprog is None: - import re - _valueprog = re.compile('^([^=]*)=(.*)$') - - match = _valueprog.match(attr) - if match: return match.group(1, 2) - return attr, None - -_hextochr = dict(('%02x' % i, chr(i)) for i in range(256)) -_hextochr.update(('%02X' % i, chr(i)) for i in range(256)) - -def unquote(s): - """unquote('abc%20def') -> 'abc def'.""" - res = s.split('%') - for i in xrange(1, len(res)): - item = res[i] - try: - res[i] = _hextochr[item[:2]] + item[2:] - except KeyError: - res[i] = '%' + item - except UnicodeDecodeError: - res[i] = unichr(int(item[:2], 16)) + item[2:] - return "".join(res) - -def unquote_plus(s): - """unquote('%7e/abc+def') -> '~/abc def'""" - s = s.replace('+', ' ') - return unquote(s) - -always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ' - 'abcdefghijklmnopqrstuvwxyz' - '0123456789' '_.-') -_safemaps = {} - -def quote(s, safe = '/'): - """quote('abc def') -> 'abc%20def' - - Each part of a URL, e.g. the path info, the query, etc., has a - different set of reserved characters that must be quoted. - - RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists - the following reserved characters. - - reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | - "$" | "," - - Each of these characters is reserved in some component of a URL, - but not necessarily in all of them. - - By default, the quote function is intended for quoting the path - section of a URL. Thus, it will not encode '/'. This character - is reserved, but in typical usage the quote function is being - called on a path where the existing slash characters are used as - reserved characters. - """ - cachekey = (safe, always_safe) - try: - safe_map = _safemaps[cachekey] - except KeyError: - safe += always_safe - safe_map = {} - for i in range(256): - c = chr(i) - safe_map[c] = (c in safe) and c or ('%%%02X' % i) - _safemaps[cachekey] = safe_map - res = map(safe_map.__getitem__, s) - return ''.join(res) - -def quote_plus(s, safe = ''): - """Quote the query fragment of a URL; replacing ' ' with '+'""" - if ' ' in s: - s = quote(s, safe + ' ') - return s.replace(' ', '+') - return quote(s, safe) - -def urlencode(query,doseq=0): - """Encode a sequence of two-element tuples or dictionary into a URL query string. - - If any values in the query arg are sequences and doseq is true, each - sequence element is converted to a separate parameter. - - If the query arg is a sequence of two-element tuples, the order of the - parameters in the output will match the order of parameters in the - input. - """ - - if hasattr(query,"items"): - # mapping objects - query = query.items() - else: - # it's a bother at times that strings and string-like objects are - # sequences... - try: - # non-sequence items should not work with len() - # non-empty strings will fail this - if len(query) and not isinstance(query[0], tuple): - raise TypeError - # zero-length sequences of all types will get here and succeed, - # but that's a minor nit - since the original implementation - # allowed empty dicts that type of behavior probably should be - # preserved for consistency - except TypeError: - ty,va,tb = sys.exc_info() - raise TypeError, "not a valid non-string sequence or mapping object", tb - - l = [] - if not doseq: - # preserve old behavior - for k, v in query: - k = quote_plus(str(k)) - v = quote_plus(str(v)) - l.append(k + '=' + v) - else: - for k, v in query: - k = quote_plus(str(k)) - if isinstance(v, str): - v = quote_plus(v) - l.append(k + '=' + v) - elif _is_unicode(v): - # is there a reasonable way to convert to ASCII? - # encode generates a string, but "replace" or "ignore" - # lose information and "strict" can raise UnicodeError - v = quote_plus(v.encode("ASCII","replace")) - l.append(k + '=' + v) - else: - try: - # is this a sufficient test for sequence-ness? - x = len(v) - except TypeError: - # not a sequence - v = quote_plus(str(v)) - l.append(k + '=' + v) - else: - # loop over the sequence - for elt in v: - l.append(k + '=' + quote_plus(str(elt))) - return '&'.join(l) - -# Proxy handling -def getproxies_environment(): - """Return a dictionary of scheme -> proxy server URL mappings. - - Scan the environment for variables named _proxy; - this seems to be the standard convention. If you need a - different way, you can pass a proxies dictionary to the - [Fancy]URLopener constructor. - - """ - proxies = {} - for name, value in os.environ.items(): - name = name.lower() - if value and name[-6:] == '_proxy': - proxies[name[:-6]] = value - return proxies - -def proxy_bypass_environment(host): - """Test if proxies should not be used for a particular host. - - Checks the environment for a variable named no_proxy, which should - be a list of DNS suffixes separated by commas, or '*' for all hosts. - """ - no_proxy = os.environ.get('no_proxy', '') or os.environ.get('NO_PROXY', '') - # '*' is special case for always bypass - if no_proxy == '*': - return 1 - # strip port off host - hostonly, port = splitport(host) - # check if the host ends with any of the DNS suffixes - for name in no_proxy.split(','): - if name and (hostonly.endswith(name) or host.endswith(name)): - return 1 - # otherwise, don't bypass - return 0 - - -if sys.platform == 'darwin': - from _scproxy import _get_proxy_settings, _get_proxies - - def proxy_bypass_macosx_sysconf(host): - """ - Return True iff this host shouldn't be accessed using a proxy - - This function uses the MacOSX framework SystemConfiguration - to fetch the proxy information. - """ - import re - import socket - from fnmatch import fnmatch - - hostonly, port = splitport(host) - - def ip2num(ipAddr): - parts = ipAddr.split('.') - parts = map(int, parts) - if len(parts) != 4: - parts = (parts + [0, 0, 0, 0])[:4] - return (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8) | parts[3] - - proxy_settings = _get_proxy_settings() - - # Check for simple host names: - if '.' not in host: - if proxy_settings['exclude_simple']: - return True - - hostIP = None - - for value in proxy_settings.get('exceptions', ()): - # Items in the list are strings like these: *.local, 169.254/16 - if not value: continue - - m = re.match(r"(\d+(?:\.\d+)*)(/\d+)?", value) - if m is not None: - if hostIP is None: - try: - hostIP = socket.gethostbyname(hostonly) - hostIP = ip2num(hostIP) - except socket.error: - continue - - base = ip2num(m.group(1)) - mask = int(m.group(2)[1:]) - mask = 32 - mask - - if (hostIP >> mask) == (base >> mask): - return True - - elif fnmatch(host, value): - return True - - return False - - - def getproxies_macosx_sysconf(): - """Return a dictionary of scheme -> proxy server URL mappings. - - This function uses the MacOSX framework SystemConfiguration - to fetch the proxy information. - """ - return _get_proxies() - - - - def proxy_bypass(host): - if getproxies_environment(): - return proxy_bypass_environment(host) - else: - return proxy_bypass_macosx_sysconf(host) - - def getproxies(): - return getproxies_environment() or getproxies_macosx_sysconf() - -elif os.name == 'nt': - def getproxies_registry(): - """Return a dictionary of scheme -> proxy server URL mappings. - - Win32 uses the registry to store proxies. - - """ - proxies = {} - try: - import _winreg - except ImportError: - # Std module, so should be around - but you never know! - return proxies - try: - internetSettings = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, - r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') - proxyEnable = _winreg.QueryValueEx(internetSettings, - 'ProxyEnable')[0] - if proxyEnable: - # Returned as Unicode but problems if not converted to ASCII - proxyServer = str(_winreg.QueryValueEx(internetSettings, - 'ProxyServer')[0]) - if '=' in proxyServer: - # Per-protocol settings - for p in proxyServer.split(';'): - protocol, address = p.split('=', 1) - # See if address has a type:// prefix - import re - if not re.match('^([^/:]+)://', address): - address = '%s://%s' % (protocol, address) - proxies[protocol] = address - else: - # Use one setting for all protocols - if proxyServer[:5] == 'http:': - proxies['http'] = proxyServer - else: - proxies['http'] = 'http://%s' % proxyServer - proxies['ftp'] = 'ftp://%s' % proxyServer - internetSettings.Close() - except (WindowsError, ValueError, TypeError): - # Either registry key not found etc, or the value in an - # unexpected format. - # proxies already set up to be empty so nothing to do - pass - return proxies - - def getproxies(): - """Return a dictionary of scheme -> proxy server URL mappings. - - Returns settings gathered from the environment, if specified, - or the registry. - - """ - return getproxies_environment() or getproxies_registry() - - def proxy_bypass_registry(host): - try: - import _winreg - import re - except ImportError: - # Std modules, so should be around - but you never know! - return 0 - try: - internetSettings = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, - r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') - proxyEnable = _winreg.QueryValueEx(internetSettings, - 'ProxyEnable')[0] - proxyOverride = str(_winreg.QueryValueEx(internetSettings, - 'ProxyOverride')[0]) - # ^^^^ Returned as Unicode but problems if not converted to ASCII - except WindowsError: - return 0 - if not proxyEnable or not proxyOverride: - return 0 - # try to make a host list from name and IP address. - rawHost, port = splitport(host) - host = [rawHost] - try: - addr = socket.gethostbyname(rawHost) - if addr != rawHost: - host.append(addr) - except socket.error: - pass - try: - fqdn = socket.getfqdn(rawHost) - if fqdn != rawHost: - host.append(fqdn) - except socket.error: - pass - # make a check value list from the registry entry: replace the - # '' string by the localhost entry and the corresponding - # canonical entry. - proxyOverride = proxyOverride.split(';') - i = 0 - while i < len(proxyOverride): - if proxyOverride[i] == '': - proxyOverride[i:i+1] = ['localhost', - '127.0.0.1', - socket.gethostname(), - socket.gethostbyname( - socket.gethostname())] - i += 1 - # print proxyOverride - # now check if we match one of the registry values. - for test in proxyOverride: - test = test.replace(".", r"\.") # mask dots - test = test.replace("*", r".*") # change glob sequence - test = test.replace("?", r".") # change glob char - for val in host: - # print "%s <--> %s" %( test, val ) - if re.match(test, val, re.I): - return 1 - return 0 - - def proxy_bypass(host): - """Return a dictionary of scheme -> proxy server URL mappings. - - Returns settings gathered from the environment, if specified, - or the registry. - - """ - if getproxies_environment(): - return proxy_bypass_environment(host) - else: - return proxy_bypass_registry(host) - -else: - # By default use environment variables - getproxies = getproxies_environment - proxy_bypass = proxy_bypass_environment - -# Test and time quote() and unquote() -def test1(): - s = '' - for i in range(256): s = s + chr(i) - s = s*4 - t0 = time.time() - qs = quote(s) - uqs = unquote(qs) - t1 = time.time() - if uqs != s: - print 'Wrong!' - print repr(s) - print repr(qs) - print repr(uqs) - print round(t1 - t0, 3), 'sec' - - -def reporthook(blocknum, blocksize, totalsize): - # Report during remote transfers - print "Block number: %d, Block size: %d, Total size: %d" % ( - blocknum, blocksize, totalsize) - -# Test program -def test(args=[]): - if not args: - args = [ - '/etc/passwd', - 'file:/etc/passwd', - 'file://localhost/etc/passwd', - 'ftp://ftp.gnu.org/pub/README', - 'http://www.python.org/index.html', - ] - if hasattr(URLopener, "open_https"): - args.append('https://synergy.as.cmu.edu/~geek/') - try: - for url in args: - print '-'*10, url, '-'*10 - fn, h = urlretrieve(url, None, reporthook) - print fn - if h: - print '======' - for k in h.keys(): print k + ':', h[k] - print '======' - fp = open(fn, 'rb') - data = fp.read() - del fp - if '\r' in data: - table = string.maketrans("", "") - data = data.translate(table, "\r") - print data - fn, h = None, None - print '-'*40 - finally: - urlcleanup() - -def main(): - import getopt, sys - try: - opts, args = getopt.getopt(sys.argv[1:], "th") - except getopt.error, msg: - print msg - print "Use -h for help" - return - t = 0 - for o, a in opts: - if o == '-t': - t = t + 1 - if o == '-h': - print "Usage: python urllib.py [-t] [url ...]" - print "-t runs self-test;", - print "otherwise, contents of urls are printed" - return - if t: - if t > 1: - test1() - test(args) - else: - if not args: - print "Use -h for help" - for url in args: - print urlopen(url).read(), - -# Run test program when run as a script -if __name__ == '__main__': - main() diff --git a/icarus-miner/data/usr/lib/python2.6/urllib.pyc b/icarus-miner/data/usr/lib/python2.6/urllib.pyc deleted file mode 100644 index d991b01..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/urllib.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/urlparse.py b/icarus-miner/data/usr/lib/python2.6/urlparse.py deleted file mode 100644 index c56d883..0000000 --- a/icarus-miner/data/usr/lib/python2.6/urlparse.py +++ /dev/null @@ -1,423 +0,0 @@ -"""Parse (absolute and relative) URLs. - -See RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, -UC Irvine, June 1995. -""" - -__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag", - "urlsplit", "urlunsplit", "parse_qs", "parse_qsl"] - -# A classification of schemes ('' means apply by default) -uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap', - 'wais', 'file', 'https', 'shttp', 'mms', - 'prospero', 'rtsp', 'rtspu', '', 'sftp'] -uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet', - 'imap', 'wais', 'file', 'mms', 'https', 'shttp', - 'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '', - 'svn', 'svn+ssh', 'sftp'] -non_hierarchical = ['gopher', 'hdl', 'mailto', 'news', - 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips'] -uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap', - 'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips', - 'mms', '', 'sftp'] -uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms', - 'gopher', 'rtsp', 'rtspu', 'sip', 'sips', ''] -uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news', - 'nntp', 'wais', 'https', 'shttp', 'snews', - 'file', 'prospero', ''] - -# Characters valid in scheme names -scheme_chars = ('abcdefghijklmnopqrstuvwxyz' - 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' - '0123456789' - '+-.') - -MAX_CACHE_SIZE = 20 -_parse_cache = {} - -def clear_cache(): - """Clear the parse cache.""" - _parse_cache.clear() - - -class ResultMixin(object): - """Shared methods for the parsed result objects.""" - - @property - def username(self): - netloc = self.netloc - if "@" in netloc: - userinfo = netloc.rsplit("@", 1)[0] - if ":" in userinfo: - userinfo = userinfo.split(":", 1)[0] - return userinfo - return None - - @property - def password(self): - netloc = self.netloc - if "@" in netloc: - userinfo = netloc.rsplit("@", 1)[0] - if ":" in userinfo: - return userinfo.split(":", 1)[1] - return None - - @property - def hostname(self): - netloc = self.netloc - if "@" in netloc: - netloc = netloc.rsplit("@", 1)[1] - if ":" in netloc: - netloc = netloc.split(":", 1)[0] - return netloc.lower() or None - - @property - def port(self): - netloc = self.netloc - if "@" in netloc: - netloc = netloc.rsplit("@", 1)[1] - if ":" in netloc: - port = netloc.split(":", 1)[1] - return int(port, 10) - return None - -from collections import namedtuple - -class SplitResult(namedtuple('SplitResult', 'scheme netloc path query fragment'), ResultMixin): - - __slots__ = () - - def geturl(self): - return urlunsplit(self) - - -class ParseResult(namedtuple('ParseResult', 'scheme netloc path params query fragment'), ResultMixin): - - __slots__ = () - - def geturl(self): - return urlunparse(self) - - -def urlparse(url, scheme='', allow_fragments=True): - """Parse a URL into 6 components: - :///;?# - Return a 6-tuple: (scheme, netloc, path, params, query, fragment). - Note that we don't break the components up in smaller bits - (e.g. netloc is a single string) and we don't expand % escapes.""" - tuple = urlsplit(url, scheme, allow_fragments) - scheme, netloc, url, query, fragment = tuple - if scheme in uses_params and ';' in url: - url, params = _splitparams(url) - else: - params = '' - return ParseResult(scheme, netloc, url, params, query, fragment) - -def _splitparams(url): - if '/' in url: - i = url.find(';', url.rfind('/')) - if i < 0: - return url, '' - else: - i = url.find(';') - return url[:i], url[i+1:] - -def _splitnetloc(url, start=0): - delim = len(url) # position of end of domain part of url, default is end - for c in '/?#': # look for delimiters; the order is NOT important - wdelim = url.find(c, start) # find first of this delim - if wdelim >= 0: # if found - delim = min(delim, wdelim) # use earliest delim position - return url[start:delim], url[delim:] # return (domain, rest) - -def urlsplit(url, scheme='', allow_fragments=True): - """Parse a URL into 5 components: - :///?# - Return a 5-tuple: (scheme, netloc, path, query, fragment). - Note that we don't break the components up in smaller bits - (e.g. netloc is a single string) and we don't expand % escapes.""" - allow_fragments = bool(allow_fragments) - key = url, scheme, allow_fragments, type(url), type(scheme) - cached = _parse_cache.get(key, None) - if cached: - return cached - if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth - clear_cache() - netloc = query = fragment = '' - i = url.find(':') - if i > 0: - if url[:i] == 'http': # optimize the common case - scheme = url[:i].lower() - url = url[i+1:] - if url[:2] == '//': - netloc, url = _splitnetloc(url, 2) - if allow_fragments and '#' in url: - url, fragment = url.split('#', 1) - if '?' in url: - url, query = url.split('?', 1) - v = SplitResult(scheme, netloc, url, query, fragment) - _parse_cache[key] = v - return v - for c in url[:i]: - if c not in scheme_chars: - break - else: - scheme, url = url[:i].lower(), url[i+1:] - if scheme in uses_netloc and url[:2] == '//': - netloc, url = _splitnetloc(url, 2) - if allow_fragments and scheme in uses_fragment and '#' in url: - url, fragment = url.split('#', 1) - if scheme in uses_query and '?' in url: - url, query = url.split('?', 1) - v = SplitResult(scheme, netloc, url, query, fragment) - _parse_cache[key] = v - return v - -def urlunparse(data): - """Put a parsed URL back together again. This may result in a - slightly different, but equivalent URL, if the URL that was parsed - originally had redundant delimiters, e.g. a ? with an empty query - (the draft states that these are equivalent).""" - scheme, netloc, url, params, query, fragment = data - if params: - url = "%s;%s" % (url, params) - return urlunsplit((scheme, netloc, url, query, fragment)) - -def urlunsplit(data): - scheme, netloc, url, query, fragment = data - if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'): - if url and url[:1] != '/': url = '/' + url - url = '//' + (netloc or '') + url - if scheme: - url = scheme + ':' + url - if query: - url = url + '?' + query - if fragment: - url = url + '#' + fragment - return url - -def urljoin(base, url, allow_fragments=True): - """Join a base URL and a possibly relative URL to form an absolute - interpretation of the latter.""" - if not base: - return url - if not url: - return base - bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ - urlparse(base, '', allow_fragments) - scheme, netloc, path, params, query, fragment = \ - urlparse(url, bscheme, allow_fragments) - if scheme != bscheme or scheme not in uses_relative: - return url - if scheme in uses_netloc: - if netloc: - return urlunparse((scheme, netloc, path, - params, query, fragment)) - netloc = bnetloc - if path[:1] == '/': - return urlunparse((scheme, netloc, path, - params, query, fragment)) - if not path: - path = bpath - if not params: - params = bparams - else: - path = path[:-1] - return urlunparse((scheme, netloc, path, - params, query, fragment)) - if not query: - query = bquery - return urlunparse((scheme, netloc, path, - params, query, fragment)) - segments = bpath.split('/')[:-1] + path.split('/') - # XXX The stuff below is bogus in various ways... - if segments[-1] == '.': - segments[-1] = '' - while '.' in segments: - segments.remove('.') - while 1: - i = 1 - n = len(segments) - 1 - while i < n: - if (segments[i] == '..' - and segments[i-1] not in ('', '..')): - del segments[i-1:i+1] - break - i = i+1 - else: - break - if segments == ['', '..']: - segments[-1] = '' - elif len(segments) >= 2 and segments[-1] == '..': - segments[-2:] = [''] - return urlunparse((scheme, netloc, '/'.join(segments), - params, query, fragment)) - -def urldefrag(url): - """Removes any existing fragment from URL. - - Returns a tuple of the defragmented URL and the fragment. If - the URL contained no fragments, the second element is the - empty string. - """ - if '#' in url: - s, n, p, a, q, frag = urlparse(url) - defrag = urlunparse((s, n, p, a, q, '')) - return defrag, frag - else: - return url, '' - -# unquote method for parse_qs and parse_qsl -# Cannot use directly from urllib as it would create circular reference. -# urllib uses urlparse methods ( urljoin) - -_hextochr = dict(('%02x' % i, chr(i)) for i in range(256)) -_hextochr.update(('%02X' % i, chr(i)) for i in range(256)) - -def unquote(s): - """unquote('abc%20def') -> 'abc def'.""" - res = s.split('%') - for i in xrange(1, len(res)): - item = res[i] - try: - res[i] = _hextochr[item[:2]] + item[2:] - except KeyError: - res[i] = '%' + item - except UnicodeDecodeError: - res[i] = unichr(int(item[:2], 16)) + item[2:] - return "".join(res) - -def parse_qs(qs, keep_blank_values=0, strict_parsing=0): - """Parse a query given as a string argument. - - Arguments: - - qs: URL-encoded query string to be parsed - - keep_blank_values: flag indicating whether blank values in - URL encoded queries should be treated as blank strings. - A true value indicates that blanks should be retained as - blank strings. The default false value indicates that - blank values are to be ignored and treated as if they were - not included. - - strict_parsing: flag indicating what to do with parsing errors. - If false (the default), errors are silently ignored. - If true, errors raise a ValueError exception. - """ - dict = {} - for name, value in parse_qsl(qs, keep_blank_values, strict_parsing): - if name in dict: - dict[name].append(value) - else: - dict[name] = [value] - return dict - -def parse_qsl(qs, keep_blank_values=0, strict_parsing=0): - """Parse a query given as a string argument. - - Arguments: - - qs: URL-encoded query string to be parsed - - keep_blank_values: flag indicating whether blank values in - URL encoded queries should be treated as blank strings. A - true value indicates that blanks should be retained as blank - strings. The default false value indicates that blank values - are to be ignored and treated as if they were not included. - - strict_parsing: flag indicating what to do with parsing errors. If - false (the default), errors are silently ignored. If true, - errors raise a ValueError exception. - - Returns a list, as G-d intended. - """ - pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] - r = [] - for name_value in pairs: - if not name_value and not strict_parsing: - continue - nv = name_value.split('=', 1) - if len(nv) != 2: - if strict_parsing: - raise ValueError, "bad query field: %r" % (name_value,) - # Handle case of a control-name with no equal sign - if keep_blank_values: - nv.append('') - else: - continue - if len(nv[1]) or keep_blank_values: - name = unquote(nv[0].replace('+', ' ')) - value = unquote(nv[1].replace('+', ' ')) - r.append((name, value)) - - return r - - -test_input = """ - http://a/b/c/d - - g:h = - http:g = - http: = - g = - ./g = - g/ = - /g = - //g = - ?y = - g?y = - g?y/./x = - . = - ./ = - .. = - ../ = - ../g = - ../.. = - ../../g = - ../../../g = - ./../g = - ./g/. = - /./g = - g/./h = - g/../h = - http:g = - http: = - http:?y = - http:g?y = - http:g?y/./x = -""" - -def test(): - import sys - base = '' - if sys.argv[1:]: - fn = sys.argv[1] - if fn == '-': - fp = sys.stdin - else: - fp = open(fn) - else: - try: - from cStringIO import StringIO - except ImportError: - from StringIO import StringIO - fp = StringIO(test_input) - for line in fp: - words = line.split() - if not words: - continue - url = words[0] - parts = urlparse(url) - print '%-10s : %s' % (url, parts) - abs = urljoin(base, url) - if not base: - base = abs - wrapped = '' % abs - print '%-10s = %s' % (url, wrapped) - if len(words) == 3 and words[1] == '=': - if wrapped != words[2]: - print 'EXPECTED', words[2], '!!!!!!!!!!' - -if __name__ == '__main__': - test() diff --git a/icarus-miner/data/usr/lib/python2.6/urlparse.pyc b/icarus-miner/data/usr/lib/python2.6/urlparse.pyc deleted file mode 100644 index 8d3dc28..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/urlparse.pyc and /dev/null differ diff --git a/icarus-miner/data/usr/lib/python2.6/warnings.py b/icarus-miner/data/usr/lib/python2.6/warnings.py deleted file mode 100644 index 59011ca..0000000 --- a/icarus-miner/data/usr/lib/python2.6/warnings.py +++ /dev/null @@ -1,402 +0,0 @@ -"""Python part of the warnings subsystem.""" - -# Note: function level imports should *not* be used -# in this module as it may cause import lock deadlock. -# See bug 683658. -import linecache -import sys -import types - -__all__ = ["warn", "showwarning", "formatwarning", "filterwarnings", - "resetwarnings", "catch_warnings"] - - -def warnpy3k(message, category=None, stacklevel=1): - """Issue a deprecation warning for Python 3.x related changes. - - Warnings are omitted unless Python is started with the -3 option. - """ - if sys.py3kwarning: - if category is None: - category = DeprecationWarning - warn(message, category, stacklevel+1) - -def _show_warning(message, category, filename, lineno, file=None, line=None): - """Hook to write a warning to a file; replace if you like.""" - if file is None: - file = sys.stderr - try: - file.write(formatwarning(message, category, filename, lineno, line)) - except IOError: - pass # the file (probably stderr) is invalid - this warning gets lost. -# Keep a worrking version around in case the deprecation of the old API is -# triggered. -showwarning = _show_warning - -def formatwarning(message, category, filename, lineno, line=None): - """Function to format a warning the standard way.""" - s = "%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message) - line = linecache.getline(filename, lineno) if line is None else line - if line: - line = line.strip() - s += " %s\n" % line - return s - -def filterwarnings(action, message="", category=Warning, module="", lineno=0, - append=0): - """Insert an entry into the list of warnings filters (at the front). - - Use assertions to check that all arguments have the right type.""" - import re - assert action in ("error", "ignore", "always", "default", "module", - "once"), "invalid action: %r" % (action,) - assert isinstance(message, basestring), "message must be a string" - assert isinstance(category, (type, types.ClassType)), \ - "category must be a class" - assert issubclass(category, Warning), "category must be a Warning subclass" - assert isinstance(module, basestring), "module must be a string" - assert isinstance(lineno, int) and lineno >= 0, \ - "lineno must be an int >= 0" - item = (action, re.compile(message, re.I), category, - re.compile(module), lineno) - if append: - filters.append(item) - else: - filters.insert(0, item) - -def simplefilter(action, category=Warning, lineno=0, append=0): - """Insert a simple entry into the list of warnings filters (at the front). - - A simple filter matches all modules and messages. - """ - assert action in ("error", "ignore", "always", "default", "module", - "once"), "invalid action: %r" % (action,) - assert isinstance(lineno, int) and lineno >= 0, \ - "lineno must be an int >= 0" - item = (action, None, category, None, lineno) - if append: - filters.append(item) - else: - filters.insert(0, item) - -def resetwarnings(): - """Clear the list of warning filters, so that no filters are active.""" - filters[:] = [] - -class _OptionError(Exception): - """Exception used by option processing helpers.""" - pass - -# Helper to process -W options passed via sys.warnoptions -def _processoptions(args): - for arg in args: - try: - _setoption(arg) - except _OptionError, msg: - print >>sys.stderr, "Invalid -W option ignored:", msg - -# Helper for _processoptions() -def _setoption(arg): - import re - parts = arg.split(':') - if len(parts) > 5: - raise _OptionError("too many fields (max 5): %r" % (arg,)) - while len(parts) < 5: - parts.append('') - action, message, category, module, lineno = [s.strip() - for s in parts] - action = _getaction(action) - message = re.escape(message) - category = _getcategory(category) - module = re.escape(module) - if module: - module = module + '$' - if lineno: - try: - lineno = int(lineno) - if lineno < 0: - raise ValueError - except (ValueError, OverflowError): - raise _OptionError("invalid lineno %r" % (lineno,)) - else: - lineno = 0 - filterwarnings(action, message, category, module, lineno) - -# Helper for _setoption() -def _getaction(action): - if not action: - return "default" - if action == "all": return "always" # Alias - for a in ('default', 'always', 'ignore', 'module', 'once', 'error'): - if a.startswith(action): - return a - raise _OptionError("invalid action: %r" % (action,)) - -# Helper for _setoption() -def _getcategory(category): - import re - if not category: - return Warning - if re.match("^[a-zA-Z0-9_]+$", category): - try: - cat = eval(category) - except NameError: - raise _OptionError("unknown warning category: %r" % (category,)) - else: - i = category.rfind(".") - module = category[:i] - klass = category[i+1:] - try: - m = __import__(module, None, None, [klass]) - except ImportError: - raise _OptionError("invalid module name: %r" % (module,)) - try: - cat = getattr(m, klass) - except AttributeError: - raise _OptionError("unknown warning category: %r" % (category,)) - if not issubclass(cat, Warning): - raise _OptionError("invalid warning category: %r" % (category,)) - return cat - - -# Code typically replaced by _warnings -def warn(message, category=None, stacklevel=1): - """Issue a warning, or maybe ignore it or raise an exception.""" - # Check if message is already a Warning object - if isinstance(message, Warning): - category = message.__class__ - # Check category argument - if category is None: - category = UserWarning - assert issubclass(category, Warning) - # Get context information - try: - caller = sys._getframe(stacklevel) - except ValueError: - globals = sys.__dict__ - lineno = 1 - else: - globals = caller.f_globals - lineno = caller.f_lineno - if '__name__' in globals: - module = globals['__name__'] - else: - module = "" - filename = globals.get('__file__') - if filename: - fnl = filename.lower() - if fnl.endswith((".pyc", ".pyo")): - filename = filename[:-1] - else: - if module == "__main__": - try: - filename = sys.argv[0] - except AttributeError: - # embedded interpreters don't have sys.argv, see bug #839151 - filename = '__main__' - if not filename: - filename = module - registry = globals.setdefault("__warningregistry__", {}) - warn_explicit(message, category, filename, lineno, module, registry, - globals) - -def warn_explicit(message, category, filename, lineno, - module=None, registry=None, module_globals=None): - lineno = int(lineno) - if module is None: - module = filename or "" - if module[-3:].lower() == ".py": - module = module[:-3] # XXX What about leading pathname? - if registry is None: - registry = {} - if isinstance(message, Warning): - text = str(message) - category = message.__class__ - else: - text = message - message = category(message) - key = (text, category, lineno) - # Quick test for common case - if registry.get(key): - return - # Search the filters - for item in filters: - action, msg, cat, mod, ln = item - if ((msg is None or msg.match(text)) and - issubclass(category, cat) and - (mod is None or mod.match(module)) and - (ln == 0 or lineno == ln)): - break - else: - action = defaultaction - # Early exit actions - if action == "ignore": - registry[key] = 1 - return - - # Prime the linecache for formatting, in case the - # "file" is actually in a zipfile or something. - linecache.getlines(filename, module_globals) - - if action == "error": - raise message - # Other actions - if action == "once": - registry[key] = 1 - oncekey = (text, category) - if onceregistry.get(oncekey): - return - onceregistry[oncekey] = 1 - elif action == "always": - pass - elif action == "module": - registry[key] = 1 - altkey = (text, category, 0) - if registry.get(altkey): - return - registry[altkey] = 1 - elif action == "default": - registry[key] = 1 - else: - # Unrecognized actions are errors - raise RuntimeError( - "Unrecognized action (%r) in warnings.filters:\n %s" % - (action, item)) - # Warn if showwarning() does not support the 'line' argument. - # Don't use 'inspect' as it relies on an extension module, which break the - # build thanks to 'warnings' being imported by setup.py. - fxn_code = None - if hasattr(showwarning, 'func_code'): - fxn_code = showwarning.func_code - elif hasattr(showwarning, '__func__'): - fxn_code = showwarning.__func__.func_code - if fxn_code: - args = fxn_code.co_varnames[:fxn_code.co_argcount] - CO_VARARGS = 0x4 - if 'line' not in args and not fxn_code.co_flags & CO_VARARGS: - showwarning_msg = ("functions overriding warnings.showwarning() " - "must support the 'line' argument") - if message == showwarning_msg: - _show_warning(message, category, filename, lineno) - else: - warn(showwarning_msg, DeprecationWarning) - # Print message and context - showwarning(message, category, filename, lineno) - - -class WarningMessage(object): - - """Holds the result of a single showwarning() call.""" - - _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file", - "line") - - def __init__(self, message, category, filename, lineno, file=None, - line=None): - local_values = locals() - for attr in self._WARNING_DETAILS: - setattr(self, attr, local_values[attr]) - self._category_name = category.__name__ if category else None - - def __str__(self): - return ("{message : %r, category : %r, filename : %r, lineno : %s, " - "line : %r}" % (self.message, self._category_name, - self.filename, self.lineno, self.line)) - - -class catch_warnings(object): - - """A context manager that copies and restores the warnings filter upon - exiting the context. - - The 'record' argument specifies whether warnings should be captured by a - custom implementation of warnings.showwarning() and be appended to a list - returned by the context manager. Otherwise None is returned by the context - manager. The objects appended to the list are arguments whose attributes - mirror the arguments to showwarning(). - - The 'module' argument is to specify an alternative module to the module - named 'warnings' and imported under that name. This argument is only useful - when testing the warnings module itself. - - """ - - def __init__(self, record=False, module=None): - """Specify whether to record warnings and if an alternative module - should be used other than sys.modules['warnings']. - - For compatibility with Python 3.0, please consider all arguments to be - keyword-only. - - """ - self._record = record - self._module = sys.modules['warnings'] if module is None else module - self._entered = False - - def __repr__(self): - args = [] - if self._record: - args.append("record=True") - if self._module is not sys.modules['warnings']: - args.append("module=%r" % self._module) - name = type(self).__name__ - return "%s(%s)" % (name, ", ".join(args)) - - def __enter__(self): - if self._entered: - raise RuntimeError("Cannot enter %r twice" % self) - self._entered = True - self._filters = self._module.filters - self._module.filters = self._filters[:] - self._showwarning = self._module.showwarning - if self._record: - log = [] - def showwarning(*args, **kwargs): - log.append(WarningMessage(*args, **kwargs)) - self._module.showwarning = showwarning - return log - else: - return None - - def __exit__(self, *exc_info): - if not self._entered: - raise RuntimeError("Cannot exit %r without entering first" % self) - self._module.filters = self._filters - self._module.showwarning = self._showwarning - - -# filters contains a sequence of filter 5-tuples -# The components of the 5-tuple are: -# - an action: error, ignore, always, default, module, or once -# - a compiled regex that must match the warning message -# - a class representing the warning category -# - a compiled regex that must match the module that is being warned -# - a line number for the line being warning, or 0 to mean any line -# If either if the compiled regexs are None, match anything. -_warnings_defaults = False -try: - from _warnings import (filters, default_action, once_registry, - warn, warn_explicit) - defaultaction = default_action - onceregistry = once_registry - _warnings_defaults = True -except ImportError: - filters = [] - defaultaction = "default" - onceregistry = {} - - -# Module initialization -_processoptions(sys.warnoptions) -if not _warnings_defaults: - simplefilter("ignore", category=PendingDeprecationWarning, append=1) - simplefilter("ignore", category=ImportWarning, append=1) - bytes_warning = sys.flags.bytes_warning - if bytes_warning > 1: - bytes_action = "error" - elif bytes_warning: - bytes_action = "default" - else: - bytes_action = "ignore" - simplefilter(bytes_action, category=BytesWarning, append=1) -del _warnings_defaults diff --git a/icarus-miner/data/usr/lib/python2.6/warnings.pyc b/icarus-miner/data/usr/lib/python2.6/warnings.pyc deleted file mode 100644 index 293107c..0000000 Binary files a/icarus-miner/data/usr/lib/python2.6/warnings.pyc and /dev/null differ diff --git a/icarus-miner/data/www/u.log b/icarus-miner/data/www/u.log deleted file mode 120000 index 40aea2a..0000000 --- a/icarus-miner/data/www/u.log +++ /dev/null @@ -1 +0,0 @@ -../root/scripts/u.log \ No newline at end of file