Module baseStore
[hide private]
[frames] | no frames]

Source Code for Module baseStore

  1   
  2  from c3errors import * 
  3  from configParser import C3Object 
  4  import os, md5, sha, time 
  5   
  6  try: 
  7      # name when installed by hand 
  8      import bsddb3 as bdb 
  9  except: 
 10      # name that comes in python 2.3 
 11      import bsddb as bdb 
 12   
 13   
14 -class SimpleStore(C3Object):
15 """ Base Store implementation. Provides non-storage-specific functions """ 16 17 # Instantiate some type of simple record store 18 currentId = -1 19 idNormalizer = None 20
21 - def __init__(self, session, node, parent):
22 C3Object.__init__(self, session, node, parent) 23 24 # databasePath is some sort of on disk filename 25 # Might be directory, but might not, so can't use defaultPath 26 # Not a path to an xml config 27 databasePath = self.get_path(session, 'databasePath') 28 if (not databasePath): 29 databasePath = "store_" + self.id 30 31 if (not os.path.isabs(databasePath)): 32 # Prepend defaultPath from parents 33 dfp = self.get_path(session, 'defaultPath') 34 if (not dfp): 35 raise(ConfigFileException("Store has relative path, and no visible defaultPath.")) 36 databasePath = os.path.join(dfp, databasePath) 37 38 # Record fully resolved path 39 self.paths['databasePath'] = databasePath 40 41 digestPath = self.get_path(session, 'digestPath') 42 if (digestPath): 43 if (not os.path.isabs(digestPath)): 44 # Prepend defaultPath from parents 45 dfp = self.get_path(session, 'defaultPath') 46 if (not dfp): 47 raise(ConfigFileException("Store has relative digestPath, and no visible defaultPath.")) 48 digestPath = os.path.join(dfp, digestPath) 49 self.paths['digestPath'] = digestPath 50 51 self.idNormalizer = self.get_path(session, 'idNormalizer') 52 self._verifyDatabases(session)
53
54 - def _verifyDatabases(self, session):
55 pass
56 57
58 -class BdbIter(object):
59 store = None 60 cursor = None 61 cxn = None 62 nextData = None 63
64 - def __init__(self, store):
65 self.cxn = bdb.db.DB() 66 self.store = store 67 dbp = self.store.get_path(None, 'databasePath') 68 self.cxn.open(dbp) 69 self.cursor = self.cxn.cursor() 70 self.nextData = self.cursor.first()
71
72 - def __iter__(self):
73 return self
74
75 - def next(self):
76 try: 77 while self.nextData and (self.nextData[0][:2] == "__"): 78 self.nextData = self.cursor.next() 79 d = self.nextData 80 if not d: 81 raise StopIteration() 82 self.nextData = self.cursor.next() 83 return d 84 except: 85 raise StopIteration()
86
87 - def jump(self, position):
88 # Jump to this position 89 self.nextData = self.cursor.set_range(position) 90 return self.nextData[0]
91 92
93 -class BdbStore(SimpleStore):
94 """ Berkeley DB based storage """ 95 cxn = None 96 digestCxn = None 97 digestForId = 0 98
99 - def __iter__(self):
100 # Return an iterator object to iter through... keys? 101 return BdbIter(self)
102
103 - def _verifyDatabases(self, session):
104 self.digestForId = self.get_setting(session, 'digestForId') 105 dbp = self.get_path(session, 'databasePath') 106 self._verifyDatabase(dbp) 107 dbp = self.get_path(session, 'digestPath') 108 if (dbp): 109 self._verifyDatabase(dbp)
110
111 - def _verifyDatabase(self, dbp):
112 if (not os.path.exists(dbp)): 113 # We don't exist, try and instantiate new database 114 self._initialise(dbp) 115 else: 116 cxn = bdb.db.DB() 117 try: 118 cxn.open(dbp) 119 cxn.close() 120 except: 121 # Still don't exist 122 self._initialise(dbp)
123
124 - def _initialise(self, dbp):
125 cxn = bdb.db.DB() 126 cxn.set_flags(bdb.db.DB_RECNUM) 127 cxn.open(dbp, dbtype=bdb.db.DB_BTREE, flags = bdb.db.DB_CREATE, mode=0660) 128 cxn.close()
129
130 - def _openContainer(self, session):
131 if self.cxn == None: 132 cxn = bdb.db.DB() 133 cxn.set_flags(bdb.db.DB_RECNUM) 134 if session.environment == "apache": 135 cxn.open(self.get_path(None, 'databasePath'), flags=bdb.db.DB_NOMMAP) 136 else: 137 dbp = self.get_path(session, 'databasePath') 138 cxn.open(dbp) 139 self.cxn = cxn 140 dbp = self.get_path(None, 'digestPath') 141 if (dbp): 142 cxn = bdb.db.DB() 143 if session.environment == "apache": 144 cxn.open(dbp, flags=bdb.db.DB_NOMMAP) 145 else: 146 cxn.open(dbp) 147 self.digestCxn = cxn 148 else: 149 self.digestCxn = self.cxn
150
151 - def _closeContainer(self, session):
152 try: 153 self.cxn.close() 154 self.cxn = None 155 except: 156 pass 157 if self.digestCxn != None: 158 self.digestCxn.close() 159 self.digestCxn = None 160 else: 161 pass
162
163 - def begin_storing(self, session):
164 self._openContainer(session) 165 return None
166
167 - def commit_storing(self, session):
168 self._closeContainer(session) 169 return None
170
171 - def generate_id(self, session):
172 self._openContainer(session) 173 # NB: First record is 0 174 if (self.currentId == -1 or session.environment == "apache"): 175 if (self.digestCxn != self.cxn or self.digestForId): 176 c = self.digestCxn.cursor() 177 item = c.last() 178 if item: 179 id = int(item[0][6:]) + 1 180 else: 181 id = 0 182 else: 183 c = self.cxn.cursor() 184 id = '999999999999' 185 if (self.idNormalizer): 186 id = self.idNormalizer.process_string(session, id) 187 try: 188 try: 189 item = c.set_range(id) 190 if (item[0][0] == "_"): 191 item = c.prev() 192 except: 193 item = c.last() 194 id = int(item[0]) + 1 195 except: 196 id = 0 197 else: 198 id = self.currentId +1 199 self.currentId = id 200 return id
201
202 - def store_data(self, session, id, data, size=0):
203 self._openContainer(session) 204 205 id = str(id) 206 if (self.idNormalizer <> None): 207 id = self.idNormalizer.process_string(session, id) 208 209 try: 210 self.cxn.put(id, data) 211 except: 212 data = data.encode('utf-8') 213 self.cxn.put(id, data) 214 215 if (size): 216 self.cxn.put("__size_%s" % id, str(size)) 217 return None
218
219 - def verify_checkSum(self, session, id, data, store=1):
220 # Check record doesn't already exist 221 digest = self.get_setting(session, "digest") 222 if (digest): 223 if (digest == 'md5'): 224 dmod = md5 225 elif (digest == 'sha'): 226 dmod = sha 227 else: 228 raise ConfigFileException("Unknown digest type: %s" % digest) 229 m = dmod.new() 230 231 data = data.encode('utf-8') 232 m.update(data) 233 234 self._openContainer(session) 235 digest = m.hexdigest() 236 cxn = self.digestCxn 237 exist = cxn.get("__d2i_" + digest) 238 if exist: 239 raise ObjectAlreadyExistsException(exist) 240 elif store: 241 self.store_checkSum(session, id, digest) 242 return digest
243
244 - def store_checkSum(self, session, id, digest):
245 id = str(id) 246 if self.idNormalizer != None: 247 tid = self.idNormalizer.process_string(session, id) 248 if tid: 249 id = tid 250 cxn = self.digestCxn 251 cxn.put("__d2i_" + digest, id) 252 cxn.put("__i2d_" + id, digest)
253
254 - def fetch_data(self, session, id):
255 self._openContainer(session) 256 sid = str(id) 257 if (self.idNormalizer <> None): 258 sid = self.idNormalizer.process_string(session, sid) 259 data = self.cxn.get(sid) 260 return data
261
262 - def delete_item(self, session, id):
263 self._openContainer(session) 264 digest = self.fetch_checksum(session, id) 265 sid = str(id) 266 if (self.idNormalizer <> None): 267 sid = self.idNormalizer.process_string(session, str(id)) 268 try: 269 self.cxn.delete(sid) 270 except: 271 # Already deleted? 272 pass 273 try: 274 self.cxn.delete("__size_%s" % sid) 275 except: 276 # Size not stored 277 pass 278 if digest: 279 cxn = self.digestCxn 280 cxn.delete("__d2i_" + digest) 281 cxn.delete("__i2d_" + sid) 282 self._closeContainer(session)
283
284 - def fetch_size(self, session, id):
285 self._openContainer(session) 286 sid = str(id) 287 if (self.idNormalizer <> None): 288 sid = self.idNormalizer.process_string(session, sid) 289 rsz = self.cxn.get("__size_%s" % (sid)) 290 if (rsz): 291 return long(rsz) 292 else: 293 return -1
294
295 - def fetch_checksum(self, session, id):
296 self._openContainer(session) 297 sid = str(id) 298 if self.idNormalizer != None: 299 sid = self.idNormalizer.process_string(session, sid) 300 cxn = self.digestCxn 301 data = cxn.get("__i2d_" + sid) 302 return data
303 304
305 - def fetch_idList(self, session, numReq=-1, start=""):
306 # return numReq ids from start 307 ids = [] 308 self._openContainer(session) 309 310 if numReq == -1 and not start: 311 keys = self.cxn.keys() 312 f = lambda k: k[0:2] != "__" 313 return filter(f, keys) 314 315 sid = str(start) 316 if self.idNormalizer != None: 317 sid = self.idNormalizer.process_string(session, sid) 318 319 if (self.digestForId): 320 # use digest database for ids 321 c = self.digestCxn.cursor() 322 spt = "__i2d_%s" % sid 323 try: 324 (key, data) = c.set_range(spt) 325 except: 326 return [] 327 ids.append(key[6:]) 328 if numReq == -1: 329 tup = c.next() 330 while tup: 331 if tup[0][:5] == "__i2d": 332 ids.append(tup[0][6:]) 333 tup = c.next() 334 else: 335 tup = () 336 else: 337 for x in range(numReq-1): 338 tup = c.next() 339 if tup and tup[0][:5] == "__i2d": 340 ids.append(tup[0][6:]) 341 else: 342 break 343 return ids 344 345 # Use normal ids 346 c = self.cxn.cursor() 347 if (sid == ""): 348 try: 349 (key, data) = c.first() 350 except: 351 # No data in store 352 return [] 353 else: 354 try: 355 (key, data) = c.set_range(sid) 356 except: 357 # No data after point in store 358 return [] 359 ids.append(key) 360 if numReq == -1: 361 tup = c.next() 362 while tup: 363 if tup[0][0] != "_": 364 ids.append(tup[0]) 365 tup = c.next() 366 else: 367 for x in range(numReq-1): 368 tup = c.next() 369 if tup: 370 ids.append(tup[0]) 371 else: 372 break 373 return ids
374
375 - def clean(self, session):
376 # delete all entries 377 self._openContainer(session) 378 self.cxn.truncate() 379 self.digestCxn.truncate()
380
381 - def get_dbsize(self, session):
382 self._openContainer(session) 383 # requires db created with DB_RECNUM 384 nk = self.cxn.stat(bdb.db.DB_FAST_STAT)['nkeys'] 385 if self.digestCxn != self.cxn and self.digestCxn != None: 386 # rec, size, digest elsewhere 387 return nk / 2 388 elif not self.get_setting(session, 'digest'): 389 # rec, size, no digest 390 return nk / 2 391 else: 392 # rec, size, digest -> rec, rec -> digest 393 return nk / 4
394 395 396 397 # XXX Should go to grid module 398 399 try: 400 from srboo import SrbConnection, SrbException 401 from baseObjects import Database 402 from tarfile import * 403 import cStringIO, StringIO, types, base64 404 from utils import parseSrbUrl 405 406 class SrbStore(C3Object): 407 """ Storage Resource Broker based storage """ 408 409 host = "" 410 port = "" 411 user = "" 412 passwd = "" 413 dn = "" 414 domain = "" 415 resource = "" 416 subcollection = "" 417 418 connection = None 419 checkSumHash = {} 420 currentId = -1 421 422 def __init__(self, session, config, parent): 423 C3Object.__init__(self, session, config, parent) 424 self.idNormalizer = self.get_path(session, 'idNormalizer') 425 self.checkSumHash = {} 426 self.currentId = -1 427 # Now find our info 428 uri = self.get_path(session, 'srbServer') 429 uri = uri.encode('utf-8') 430 uri = uri.strip() 431 if not uri: 432 raise ConfigFileException("No srbServer to connect to.") 433 else: 434 info = parseSrbUrl(uri) 435 for (a,v) in self.info.items(): 436 setattr(self, a, v) 437 438 if (isinstance(parent, Database)): 439 sc = parent.id + "/" + self.id 440 else: 441 sc = self.id 442 self.subcollection = info['path'] + "/cheshire3/" + sc 443 444 try: 445 self.connection = SrbConnection(self.host, self.port, self.domain, user = self.user, passwd = self.passwd) 446 self.connection.resource = self.resource 447 except SrbException: 448 # Couldn't connect :/ 449 self.connection = None 450 raise 451 xsc = self.subcollection + "/d2i" 452 scs = xsc.split('/') 453 orig = self.connection.collection 454 for c in scs: 455 try: 456 self.connection.create_collection(c) 457 except SrbException, e: 458 # Err, at some point it should fail 459 # trying to create an existing collection... 460 pass 461 self.connection.open_collection(c) 462 self.connection.open_collection(orig) 463 self.connection.open_collection(self.subcollection) 464 465 def _openContainer(self, session): 466 if self.connection == None: 467 try: 468 self.connection = SrbConnection(self.host, self.port, self.domain, user = self.user, passwd = self.passwd, dn = self.dn) 469 self.connection.resource = self.resource 470 except SrbException: 471 # Couldn't connect :/ 472 raise 473 self.connection.open_collection(self.subcollection) 474 475 def _closeContainer(self, session): 476 if self.connection != None: 477 self.connection.disconnect() 478 self.connection = None 479 480 def begin_storing(self, session): 481 self._openContainer(session) 482 483 def commit_storing(self, session): 484 self._closeContainer(session) 485 486 def generate_id(self, session): 487 # XXX Will fail for large collections. 488 489 if self.currentId == -1: 490 self._openContainer(session) 491 n = self.connection.n_objects() 492 if (n == 0): 493 self.currentId = 0 494 else: 495 name = self.connection.object_metadata(n-1) 496 if (name.isdigit()): 497 self.currentId = int(name) + 1 498 else: 499 raise ValueError("XXX: Can't generate new ids for non int stores") 500 else: 501 self.currentId = self.currentId + 1 502 return self.currentId 503 504 def store_data(self, session, id, data, size=0): 505 self._openContainer(session) 506 id = str(id) 507 if (self.idNormalizer <> None): 508 id = self.idNormalizer.process_string(session, id) 509 try: 510 f = self.connection.create(id) 511 except SrbException: 512 f = self.connection.open(id, 'w') 513 f.write(data) 514 515 if (0): 516 if (size): 517 f.set_umetadata('size', str(size)) 518 if (self.checkSumHash.has_key(id)): 519 f.set_umetadata('digest', self.checkSumHash[id]) 520 f.close() 521 522 return None 523 524 def fetch_data(self, session, id): 525 self._openContainer(session) 526 sid = str(id) 527 if (self.idNormalizer <> None): 528 sid = self.idNormalizer.process_string(session, sid) 529 f = self.connection.open(sid) 530 data = f.read() 531 f.close() 532 return data 533 534 def delete_item(self, session, id): 535 self._openContainer(session) 536 sid = str(id) 537 if (self.idNormalizer <> None): 538 sid = self.idNormalizer.process_string(session, sid) 539 f = self.connection.open(sid) 540 digest = f.get_umetadata().get('digest', '') 541 f.delete() 542 f.close() 543 544 if digest: 545 self.connection.open_collection('d2i') 546 f = self.connection.open(digest) 547 f.delete() 548 f.close() 549 self.connection.up_collection() 550 551 552 def fetch_idList(self, session, numReq=-1, start=""): 553 self._openContainer(session) 554 (scs, objs) = self.connection.walk_names() 555 return objs 556 557 def verify_checkSum(self, session, id, data, store=1): 558 digest = self.get_setting(session, "digest") 559 if (digest): 560 if (digest == 'md5'): 561 dmod = md5 562 elif (digest == 'sha'): 563 dmod = sha 564 else: 565 raise ConfigFileException("Unknown digest type: %s" % digest) 566 m = dmod.new() 567 data = data.encode('utf-8') 568 m.update(data) 569 self._openContainer(session) 570 digest = m.hexdigest() 571 572 if self.connection.objects > 0: 573 self.connection.open_collection('d2i') 574 try: 575 f = self.connection.open(digest) 576 data = f.read(); 577 f.close() 578 raise ObjectAlreadyExistsException(data) 579 except: 580 pass 581 self.connection.up_collection() 582 583 if store: 584 self.store_checkSum(session, id, digest) 585 return digest 586 587 def fetch_checkSum(self, session, id): 588 self._openContainer(session) 589 sid = str(id) 590 if (self.idNormalizer <> None): 591 sid = self.idNormalizer.process_string(session, sid) 592 f = self.connection.open(sid) 593 data = f.get_umetadata('digest') 594 f.close() 595 return data 596 597 598 def fetch_size(self, session, id): 599 self._open_container(session) 600 sid = str(id) 601 if (self.idNormalizer <> None): 602 sid = self.idNormalizer.process_string(session, sid) 603 f = self.connection.open(sid) 604 data = f.get_umetadata('size') 605 f.close() 606 return int(data) 607 608 def store_checkSum(self, session, id, digest): 609 sid = str(id) 610 if (self.idNormalizer <> None): 611 sid = self.idNormalizer.process_string(session, sid) 612 self.checkSumHash[sid] = digest 613 self.connection.open_collection('d2i') 614 try: 615 f = self.connection.create(digest) 616 f.write(sid) 617 f.close() 618 except: 619 pass 620 self.connection.up_collection() 621 622 def clean(self, session): 623 # Remove all files from Srb 624 self._openContainer(session) 625 self.connection.rmrf() 626 self._closeContainer(session) 627 628 629 class SrbBdbCombineStore(SrbStore, BdbStore): 630 """ Combined BerkeleyDB in SRB based Storage """ 631 632 # Combine up to X records into one file 633 # Store metadata locally in bdb 634 maxRecords = 100 635 incomingRecords = [] 636 cachedFilename = "" 637 cachedTarfile = "" 638 639 def __init__(self, session, config, parent): 640 BdbStore.__init__(self, session, config, parent) 641 SrbStore.__init__(self, session, config, parent) 642 self.maxRecords = int(self.get_setting(session, "recordsPerFile")) 643 self.digestForId = self.get_setting(session, "digestForId") 644 self.useBase64 = self.get_setting(session, "base64") 645 self.incomingRecords = [] 646 647 def _verifyDatabases(self, session): 648 BdbStore._verifyDatabases(self, session) 649 650 def begin_storing(self, session): 651 BdbStore._openContainer(self, session) 652 SrbStore._openContainer(self, session) 653 654 def commit_storing(self, session): 655 self._writeCache(session) 656 BdbStore._closeContainer(self, session) 657 SrbStore._closeContainer(self, session) 658 659 def generate_id(self, session): 660 return BdbStore.generate_id(self, session) 661 662 def verify_checkSum(self, session, id, data, store=1): 663 return BdbStore.verify_checkSum(self, session, id, data, store) 664 665 def fetch_checkSum(self, session, id): 666 return BdbStore.fetch_checkSum(self, session, id) 667 668 def fetch_size(self, session, id): 669 return BdbStore.fetch_size(self, session, id) 670 671 def store_checkSum(self, session, id, digest): 672 return BdbStore.store_checkSum(self, session, id, digest) 673 674 def delete_item(self, session, id): 675 SrbStore.delete_item(self, session, id) 676 BdbStore.delete_item(self, session, id) 677 678 def fetch_idList(self, session, numReq=-1, start=""): 679 return BdbStore.fetch_idList(self, session, numReq, start) 680 681 def clean(self, session): 682 SrbStore.clean(self, session) 683 # XXX: And truncate local metadata store? 684 # BdbStore.clean(self, session) 685 686 def fetch_data(self, session, id): 687 # Extract from chunk 688 # Cache most recent chunk as likely to be pulled back in order 689 SrbStore._openContainer(self, session) 690 sid = str(id) 691 startid = id/self.maxRecords*self.maxRecords 692 startsid = str(startid) 693 end = startid + self.maxRecords -1 694 endsid = str(end) 695 if (self.idNormalizer <> None): 696 sid = self.idNormalizer.process_string(session, sid) 697 endsid = self.idNormalizer.process_string(session, endsid) 698 startsid = self.idNormalizer.process_string(session, startsid) 699 filename = "%s-%s.tar" % (startsid, endsid) 700 701 if (self.cachedFilename != filename): 702 f = self.connection.open(filename) 703 data = f.read() 704 f.close() 705 if self.useBase64: 706 data = base64.b64decode(data) 707 self.cachedTarfile = data 708 self.cachedFilename = filename 709 else: 710 data = self.cachedTarfile 711 712 # Extract from tar 713 buffer = StringIO.StringIO(data) 714 tar = TarFile.open(mode="r|", fileobj=buffer) 715 # This is very odd, but appears to be necessary 716 f = None 717 for ti in tar: 718 if ti.name == sid: 719 f = tar.extractfile(ti) 720 break 721 if f != None: 722 recdata = f.read() 723 f.close() 724 else: 725 # Can't find record in tar file?! 726 raise ValueError("Can't find record?") 727 tar.close() 728 buffer.close() 729 return recdata 730 731 def store_data(self, session, id, data, size=0): 732 # Cache until X items then push 733 if (len(self.incomingRecords) == 1 and id != self.incomingRecords[-1][0] +1): 734 # Write single 735 self._writeSingle(session, self.incomingRecords[0]) 736 self.incomingRecords = [(id, data)] 737 elif len(self.incomingRecords) < self.maxRecords: 738 self.incomingRecords.append((id, data)) 739 # XXX Store size/digest? 740 else: 741 # Write cache as TarFile 742 self._writeCache(session) 743 self.incomingRecords = [(id, data)] 744 745 def _writeCache(self, session): 746 # Called from commit and store_data 747 748 if (len(self.incomingRecords) == 1): 749 return self._writeSingle(session, self.incomingRecords) 750 751 tarbuffer = StringIO.StringIO("") 752 tar = TarFile.open(mode="w|", fileobj=tarbuffer) 753 for (id, data) in self.incomingRecords: 754 sid = str(id) 755 if (self.idNormalizer != None): 756 sid = self.idNormalizer.process_string(session, sid) 757 ti = TarInfo(sid) 758 if type(data) == types.UnicodeType: 759 data = data.encode('utf-8') 760 ti.size = len(data) 761 buff = cStringIO.StringIO(data) 762 buff.seek(0) 763 tar.addfile(ti, buff) 764 buff.close() 765 tar.close() 766 767 tarbuffer.seek(0) 768 data = tarbuffer.read() 769 tarbuffer.close() 770 771 if self.useBase64: 772 data = base64.b64encode(data) 773 774 # Now store tar in SRB 775 startsid = str(self.incomingRecords[0][0]) 776 endsid = str(self.incomingRecords[0][0] + self.maxRecords -1) 777 778 if (self.idNormalizer != None): 779 startsid = self.idNormalizer.process_string(session, startsid) 780 endsid = self.idNormalizer.process_string(session, endsid) 781 782 name = "%s-%s.tar" % (startsid, endsid) 783 self.incomingRecords = [] 784 785 SrbStore._openContainer(self, session) 786 try: 787 f = self.connection.create(name) 788 except SrbException: 789 f = self.connection.open(id, 'w') 790 f.write(data) 791 f.close() 792 793 def _writeSingle(self, session, info): 794 # Writing record into a tar file 795 # May or may not exist already, but assume it does 796 797 (id, recdata) = info 798 sid = str(id) 799 startsid = str(id/100*100) 800 end = id + self.maxRecords 801 endsid = str(end) 802 if (self.idNormalizer <> None): 803 sid = self.idNormalizer.process_string(session, sid) 804 endsid = self.idNormalizer.process_string(session, endsid) 805 startsid = self.idNormalizer.process_string(session, startsid) 806 filename = "%s-%s.tar" % (startsid, endsid) 807 808 SrbStore._openContainer(self, session) 809 try: 810 f = self.connection.open(filename, 'w') 811 except SrbException: 812 f = self.connection.create(filename) 813 data = f.read() 814 f.seek(0) 815 816 # Put file into tar 817 818 tarbuffer = cStringIO.StringIO(data) 819 tar = TarFile.open(mode="w|", fileobj=tarbuffer) 820 ti = TarInfo(sid) 821 buff = cStringIO.StringIO(recdata) 822 ti.frombuf(buff) 823 ti.size = len(recdata) 824 buff.seek(0) 825 tar.addfile(ti, buff) 826 buff.close() 827 tar.close() 828 tarbuffer.seek(0) 829 newdata = tarbuffer.read() 830 tarbuffer.close() 831 832 # XXX Will this work for inserting smaller records? 833 # Or will we end up with junk at end of file? 834 f.write(newdata) 835 f.close() 836 837 except: 838 pass 839