Package backend :: Package server :: Package importlib :: Module backend
[hide private]
[frames] | no frames]

Source Code for Module backend.server.importlib.backend

   1  # 
   2  # Copyright (c) 2008--2017 Red Hat, Inc. 
   3  # 
   4  # This software is licensed to you under the GNU General Public License, 
   5  # version 2 (GPLv2). There is NO WARRANTY for this software, express or 
   6  # implied, including the implied warranties of MERCHANTABILITY or FITNESS 
   7  # FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2 
   8  # along with this software; if not, see 
   9  # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. 
  10  # 
  11  # Red Hat trademarks are not licensed under GPLv2. No permission is 
  12  # granted to use or replicate Red Hat trademarks that are incorporated 
  13  # in this software or its documentation. 
  14  # 
  15  # 
  16  # Generic DB backend 
  17  # 
  18   
  19  import copy 
  20  import string 
  21  import sys 
  22   
  23  from spacewalk.common.usix import raise_with_tb 
  24  from spacewalk.common import rhn_rpm 
  25  from spacewalk.common.rhnConfig import CFG 
  26  from spacewalk.common.rhnException import rhnFault 
  27  from spacewalk.server import rhnSQL, rhnChannel, taskomatic 
  28  from importLib import Diff, Package, IncompletePackage, Erratum, \ 
  29      AlreadyUploadedError, InvalidPackageError, TransactionError, \ 
  30      InvalidSeverityError, SourcePackage 
  31  from backendLib import TableCollection, sanitizeValue, TableDelete, \ 
  32      TableUpdate, TableLookup, addHash, TableInsert 
  33   
  34  sequences = { 
  35      'rhnPackageCapability': 'rhn_pkg_capability_id_seq', 
  36      'rhnPackage': 'rhn_package_id_seq', 
  37      'rhnSourceRPM': 'rhn_sourcerpm_id_seq', 
  38      'rhnPackageGroup': 'rhn_package_group_id_seq', 
  39      'rhnErrata': 'rhn_errata_id_seq', 
  40      'rhnChannel': 'rhn_channel_id_seq', 
  41      'rhnChannelProduct': 'rhn_channelprod_id_seq', 
  42      'rhnPackageSource': 'rhn_package_source_id_seq', 
  43      'rhnChannelFamily': 'rhn_channel_family_id_seq', 
  44      'rhnCVE': 'rhn_cve_id_seq', 
  45      'rhnChannelArch': 'rhn_channel_arch_id_seq', 
  46      'rhnPackageArch': 'rhn_package_arch_id_seq', 
  47      'rhnServerArch': 'rhn_server_arch_id_seq', 
  48      'rhnCPUArch': 'rhn_cpu_arch_id_seq', 
  49      'rhnErrataFile': 'rhn_erratafile_id_seq', 
  50      'rhnKickstartableTree': 'rhn_kstree_id_seq', 
  51      'rhnArchType': 'rhn_archtype_id_seq', 
  52      'rhnPackageChangeLogRec': 'rhn_pkg_cl_id_seq', 
  53      'rhnPackageChangeLogData': 'rhn_pkg_cld_id_seq', 
  54      'rhnContentSource': 'rhn_chan_content_src_id_seq', 
  55  } 
  56   
  57   
58 -class Backend:
59 # This object is initialized by the specific subclasses (e.g. 60 # OracleBackend) 61 tables = TableCollection() 62 # TODO: Some reason why we're passing a module in here? Seems to 63 # always be rhnSQL anyhow... 64
65 - def __init__(self, dbmodule):
66 self.dbmodule = dbmodule 67 self.sequences = {}
68 69 # TODO: Why is there a pseudo-constructor here instead of just using 70 # __init__?
71 - def init(self):
72 # Initializes the database connection objects 73 # This function has to be called on a newly defined Backend object 74 # Initialize sequences 75 for k, v in sequences.items(): 76 self.sequences[k] = self.dbmodule.Sequence(v) 77 # TODO: Why do we return a reference to ourselves? If somebody called 78 # this method they already have a reference... 79 return self
80
81 - def setDateFormat(self, format):
82 sth = self.dbmodule.prepare("alter session set nls_date_format ='%s'" 83 % format) 84 sth.execute() 85 sth = self.dbmodule.prepare("alter session set nls_timestamp_format ='%s'" 86 % format) 87 sth.execute()
88 89 # Note: postgres-specific implementation overrides this in PostgresBackend
90 - def processCapabilities(self, capabilityHash):
91 h = self.dbmodule.prepare("select lookup_package_capability(:name, :version) as id from dual") 92 for name, version in capabilityHash.keys(): 93 ver = version 94 if version is None or version == '': 95 ver = None 96 h.execute(name=name, version=ver) 97 row = h.fetchone_dict() 98 capabilityHash[(name, version)] = row['id']
99
100 - def processChangeLog(self, changelogHash):
101 if CFG.has_key('package_import_skip_changelog') and CFG.package_import_skip_changelog: 102 return 103 sql = "select id from rhnPackageChangeLogData where name = :name and time = :time and text = :text" 104 h = self.dbmodule.prepare(sql) 105 toinsert = [[], [], [], []] 106 for name, time, text in changelogHash.keys(): 107 val = {} 108 _buildExternalValue(val, {'name': name, 'time': time, 'text': text}, self.tables['rhnPackageChangeLogData']) 109 h.execute(name=val['name'], time=val['time'], text=val['text']) 110 row = h.fetchone_dict() 111 if row: 112 changelogHash[(name, time, text)] = row['id'] 113 continue 114 115 id = self.sequences['rhnPackageChangeLogData'].next() 116 changelogHash[(name, time, text)] = id 117 118 toinsert[0].append(id) 119 toinsert[1].append(val['name']) 120 toinsert[2].append(val['time']) 121 toinsert[3].append(val['text']) 122 123 if not toinsert[0]: 124 # Nothing to do 125 return 126 127 sql = "insert into rhnPackageChangeLogData (id, name, time, text) values (:id, :name, :time, :text)" 128 h = self.dbmodule.prepare(sql) 129 h.executemany(id=toinsert[0], name=toinsert[1], time=toinsert[2], text=toinsert[3])
130
131 - def processCVEs(self, cveHash):
132 # First figure out which CVE's are already inserted 133 sql = "select id from rhnCVE where name = :name" 134 h = self.dbmodule.prepare(sql) 135 toinsert = [[], []] 136 137 for cve_name in cveHash.keys(): 138 h.execute(name=cve_name) 139 row = h.fetchone_dict() 140 141 if row: 142 cveHash[cve_name] = row['id'] 143 continue 144 145 # Generate an id 146 id = self.sequences['rhnCVE'].next() 147 148 cveHash[cve_name] = id 149 150 toinsert[0].append(id) 151 toinsert[1].append(cve_name) 152 153 if not toinsert[0]: 154 # Nothing to do 155 return 156 157 sql = "insert into rhnCVE (id, name) values (:id, :name)" 158 h = self.dbmodule.prepare(sql) 159 h.executemany(id=toinsert[0], name=toinsert[1])
160
161 - def lookupErrataFileTypes(self, hash):
162 hash.clear() 163 h = self.dbmodule.prepare("select id, label from rhnErrataFileType") 164 h.execute() 165 while 1: 166 row = h.fetchone_dict() 167 if not row: 168 break 169 hash[row['label']] = row['id'] 170 return hash
171
172 - def __lookupArches(self, archHash, table):
173 if not archHash: 174 return 175 176 sql = "select id from %s where label = :name" % table 177 h = self.dbmodule.prepare(sql) 178 for k in archHash.keys(): 179 h.execute(name=str(k)) 180 row = h.fetchone_dict() 181 if row: 182 archHash[k] = row['id']
183 # Else, it's an unsupported architecture 184
185 - def lookupChannelArches(self, archHash):
186 return self.__lookupArches(archHash, 'rhnChannelArch')
187
188 - def lookupPackageArches(self, archHash):
189 return self.__lookupArches(archHash, 'rhnPackageArch')
190
191 - def lookupServerArches(self, archHash):
192 return self.__lookupArches(archHash, 'rhnServerArch')
193
194 - def lookupArchTypes(self, arch_types_hash):
195 h = self.dbmodule.prepare( 196 "select id, name from rhnArchType where label = :label") 197 seq = self.sequences['rhnArchType'] 198 updates = [[], []] 199 inserts = [[], [], []] 200 results = {} 201 for label, name in arch_types_hash.items(): 202 h.execute(label=label) 203 row = h.fetchone_dict() 204 if not row: 205 next_id = seq.next() 206 inserts[0].append(next_id) 207 inserts[1].append(label) 208 inserts[2].append(name) 209 results[label] = next_id 210 continue 211 aid = row['id'] 212 results[label] = aid 213 if name == row['name']: 214 # Nothing to do 215 continue 216 updates[0].append(aid) 217 updates[1].append(name) 218 if inserts[0]: 219 h = self.dbmodule.prepare(""" 220 insert into rhnArchType (id, label, name) 221 values (:id, :label, :name) 222 """) 223 h.executemany(id=inserts[0], label=inserts[1], name=inserts[2]) 224 if updates[0]: 225 h = self.dbmodule.prepare(""" 226 update rhnArchType 227 set name = :name 228 where id = :id 229 """) 230 h.executemany(id=updates[0], name=updates[1]) 231 232 # Finally, update the hash 233 arch_types_hash.update(results)
234
235 - def _lookupOrg(self):
236 # Returns the org id 237 sql = "select min(id) as id from web_customer" 238 h = self.dbmodule.prepare(sql) 239 h.execute() 240 rows = h.fetchall_dict() 241 if not rows: 242 raise ValueError("No user is created") 243 return rows[0]['id']
244
245 - def lookupOrg(self, org_name=None):
246 if not org_name: 247 return self._lookupOrg() 248 # Returns id of the org if found, None otherwise 249 sql = "select id from web_customer where name = :name" 250 h = self.dbmodule.prepare(sql) 251 h.execute(name=org_name) 252 row = h.fetchone_dict() 253 if not row: 254 return None 255 return row['id']
256
257 - def lookupMaster(self, master_label):
258 # Returns the master record (if it exists) 259 sql = "select * from rhnISSMaster where label = :label" 260 h = self.dbmodule.prepare(sql) 261 h.execute(label=master_label) 262 return h.fetchone_dict()
263
264 - def createMaster(self, master_label):
265 # Creates a master record with label master_label 266 sql = """ 267 insert into rhnISSMaster (id, label) 268 values (sequence_nextval('rhn_issmaster_seq'), :label) 269 """ 270 h = self.dbmodule.prepare(sql) 271 h.execute(label=master_label)
272
273 - def createMasterOrgs(self, master, orgs):
274 # Create master org records 275 insert = [[], [], []] 276 for org in orgs: 277 insert[0].append(master) 278 insert[1].append(org['id']) 279 insert[2].append(org['name']) 280 sql = """ 281 insert into rhnISSMasterOrgs 282 (id, master_id, master_org_id, master_org_name) 283 values (sequence_nextval('rhn_issmasterorgs_seq'), 284 (select id from rhnISSMaster where label = :label), 285 :id, :name) 286 """ 287 h = self.dbmodule.prepare(sql) 288 h.executemany(label=insert[0], id=insert[1], name=insert[2])
289
290 - def createOrgs(self, orgs):
291 # Create local org records 292 sql = """ 293 insert into web_customer (id, name) 294 values (sequence_nextval('web_customer_id_seq'), :name) 295 """ 296 h = self.dbmodule.prepare(sql) 297 h.executemany(name=orgs) 298 sql = "select id, name from web_customer" 299 h = self.dbmodule.prepare(sql) 300 h.execute() 301 rows = h.fetchall_dict() 302 ret = {} 303 for row in rows: 304 ret[row['name']] = row['id'] 305 return ret
306
307 - def updateMasterOrgs(self, master_orgs):
308 # Update the master org to local org mapping 309 insert = [[], []] 310 for org in master_orgs: 311 insert[0].append(org['master_id']) 312 insert[1].append(org['local_id']) 313 sql = """ 314 update rhnISSMasterOrgs 315 set local_org_id=:local 316 where master_org_id=:master 317 """ 318 h = self.dbmodule.prepare(sql) 319 h.executemany(master=insert[0], local=insert[1])
320
321 - def lookupOrgTrusts(self):
322 # Return a hash of org trusts 323 sql = "select org_id, org_trust_id from rhnTrustedOrgs" 324 h = self.dbmodule.prepare(sql) 325 h.execute() 326 rows = h.fetchall_dict() 327 ret = {} 328 if rows: 329 for row in rows: 330 if row['org_id'] not in list(ret.keys()): 331 ret[row['org_id']] = [] 332 ret[row['org_id']].append(row['org_trust_id']) 333 return ret
334
335 - def clearOrgTrusts(self, org_id):
336 # Delete all trusts involving this org; trusts are always 337 # bi-directional 338 sql = """ 339 delete from rhnTrustedOrgs 340 where org_id = :org_id 341 or org_trust_id = :org_id 342 """ 343 h = self.dbmodule.prepare(sql) 344 h.execute(org_id=org_id)
345
346 - def createOrgTrusts(self, trusts):
347 # Create org trusts 348 insert = [[], []] 349 for trust in trusts: 350 insert[0].append(trust['org_id']) 351 insert[1].append(trust['trust']) 352 sql = """ 353 insert into rhnTrustedOrgs (org_id, org_trust_id) 354 values (:id, :trust) 355 """ 356 h = self.dbmodule.prepare(sql) 357 h.executemany(id=insert[0], trust=insert[1])
358
359 - def lookupOrgMap(self, master_label):
360 sql = """ 361 select imo.master_org_id, imo.master_org_name, imo.local_org_id 362 from rhnISSMasterOrgs imo, 363 rhnISSMaster im 364 where im.id = imo.master_id 365 and im.label = :master_label 366 """ 367 h = self.dbmodule.prepare(sql) 368 h.execute(master_label=master_label) 369 rows = h.fetchall_dict() 370 maps = {'master-name-to-master-id': {}, 371 'master-id-to-local-id': {}} 372 if not rows: 373 return maps 374 mn_to_mi = {} # master org name to master org id map 375 mi_to_li = {} # master org id to local org id map 376 for org in rows: 377 if ('master_org_id' in list(org.keys()) 378 and 'master_org_name' in list(org.keys()) 379 and org['master_org_id'] 380 and org['master_org_name']): 381 mn_to_mi[org['master_org_name']] = org['master_org_id'] 382 if ('master_org_id' in list(org.keys()) 383 and 'local_org_id' in list(org.keys()) 384 and org['master_org_id'] 385 and org['local_org_id']): 386 mi_to_li[org['master_org_id']] = org['local_org_id'] 387 maps['master-name-to-master-id'] = mn_to_mi 388 maps['master-id-to-local-id'] = mi_to_li 389 return maps
390
391 - def lookupChannels(self, hash):
392 if not hash: 393 return 394 sql = "select id, channel_arch_id from rhnChannel where label = :label" 395 h = self.dbmodule.prepare(sql) 396 for k in hash.keys(): 397 h.execute(label=k) 398 row = h.fetchone_dict() 399 if row: 400 hash[k] = row
401 # Else, it's an unsupported channel 402
403 - def lookupChannelPackageArchCompat(self, channelArchHash):
404 # Return all the arches compatible with each key of archHash 405 sql = """ 406 select package_arch_id 407 from rhnChannelPackageArchCompat 408 where channel_arch_id = :channel_arch_id 409 """ 410 h = self.dbmodule.prepare(sql) 411 for channel_arch_id in channelArchHash.keys(): 412 dict = {} 413 h.execute(channel_arch_id=channel_arch_id) 414 while 1: 415 row = h.fetchone_dict() 416 if not row: 417 break 418 dict[row['package_arch_id']] = None 419 channelArchHash[channel_arch_id] = dict
420
421 - def lookupServerGroupTypes(self, entries_hash):
422 sql = """ 423 select id 424 from rhnServerGroupType 425 where label = :label 426 """ 427 h = self.dbmodule.prepare(sql) 428 for sgt in entries_hash.keys(): 429 h.execute(label=sgt) 430 row = h.fetchone_dict() 431 if not row: 432 # server group not found 433 continue 434 entries_hash[sgt] = row['id']
435
436 - def lookupPackageNames(self, nameHash):
437 if not nameHash: 438 return 439 sql = "select LOOKUP_PACKAGE_NAME(:name) id from dual" 440 h = self.dbmodule.prepare(sql) 441 for k in nameHash.keys(): 442 h.execute(name=k) 443 nameHash[k] = h.fetchone_dict()['id']
444
445 - def lookupErratum(self, erratum):
446 if not erratum: 447 return None 448 449 sql = """ 450 select advisory 451 from rhnErrata 452 where advisory_name = :advisory_name 453 """ 454 h = self.dbmodule.prepare(sql) 455 h.execute(advisory_name=erratum['advisory_name']) 456 return h.fetchone_dict()
457
458 - def lookupErrataSeverityId(self, erratum):
459 """ 460 for the given severity type retuns the id 461 associated in the rhnErratSeverity table. 462 """ 463 if not erratum: 464 return None 465 466 sql = """ 467 select id 468 from rhnErrataSeverity 469 where label = :severity 470 """ 471 472 h = self.dbmodule.prepare(sql) 473 474 if erratum['security_impact'] == '': 475 return None 476 477 # concatenate the severity to reflect the db 478 # bz-204374: rhnErrataSeverity tbl has lower case severity values, 479 # so we convert severity in errata hash to lower case to lookup. 480 severity_label = 'errata.sev.label.' + erratum['security_impact'].lower() 481 482 h.execute(severity=severity_label) 483 row = h.fetchone_dict() 484 485 if not row: 486 raise InvalidSeverityError("Invalid severity: %s" % erratum['security_impact']) 487 488 return row['id']
489
490 - def lookupEVRs(self, evrHash):
491 sql = "select LOOKUP_EVR(:epoch, :version, :release) id from dual" 492 h = self.dbmodule.prepare(sql) 493 for evr in evrHash.keys(): 494 epoch, version, release = evr 495 if epoch == '' or epoch is None: 496 epoch = None 497 else: 498 epoch = str(epoch) 499 h.execute(epoch=epoch, version=version, release=release) 500 row = h.fetchone_dict() 501 if row: 502 evrHash[evr] = row['id']
503 504 # Note: postgres-specific implementation overrides this in PostgresBackend
505 - def lookupChecksums(self, checksumHash):
506 if not checksumHash: 507 return 508 sql = "select lookup_checksum(:ctype, :csum) id from dual" 509 h = self.dbmodule.prepare(sql) 510 for k in checksumHash.keys(): 511 ctype, csum = k 512 if csum != '': 513 h.execute(ctype=ctype, csum=csum) 514 row = h.fetchone_dict() 515 if row: 516 checksumHash[k] = row['id']
517
518 - def lookupChecksumTypes(self, checksumTypeHash):
519 if not checksumTypeHash: 520 return 521 sql = "select id from rhnChecksumType where label = :label" 522 h = self.dbmodule.prepare(sql) 523 for l in checksumTypeHash.keys(): 524 h.execute(label=l) 525 row = h.fetchone_dict() 526 if row: 527 checksumTypeHash[l] = row['id']
528
529 - def lookupPackageNEVRAs(self, nevraHash):
530 sql = "select LOOKUP_PACKAGE_NEVRA(:name, :evr, :arch) id from dual" 531 h = self.dbmodule.prepare(sql) 532 for nevra in nevraHash: 533 name, evr, arch = nevra 534 if arch is None: 535 arch = '' 536 h.execute(name=name, evr=evr, arch=arch) 537 row = h.fetchone_dict() 538 if row: 539 nevraHash[nevra] = row['id']
540
541 - def lookupPackagesByNEVRA(self, nevraHash):
542 sql = """ 543 select id from rhnPackage 544 where name_id = :name and 545 evr_id = :evr and 546 package_arch_id = :arch 547 """ 548 h = self.dbmodule.prepare(sql) 549 550 for nevra in nevraHash: 551 name, evr, arch = nevra 552 h.execute(name=name, evr=evr, arch=arch) 553 row = h.fetchone_dict() 554 if row: 555 nevraHash[nevra] = row['id']
556
557 - def lookupPackageKeyId(self, header):
558 lookup_keyid_sql = rhnSQL.prepare(""" 559 select pk.id 560 from rhnPackagekey pk, 561 rhnPackageKeyType pkt, 562 rhnPackageProvider pp 563 where pk.key_id = :key_id 564 and pk.key_type_id = pkt.id 565 and pk.provider_id = pp.id 566 """) 567 sigkeys = rhn_rpm.RPM_Header(header).signatures 568 key_id = None # _key_ids(sigkeys)[0] 569 for sig in sigkeys: 570 if sig['signature_type'] == 'gpg': 571 key_id = sig['key_id'] 572 573 lookup_keyid_sql.execute(key_id=key_id) 574 keyid = lookup_keyid_sql.fetchall_dict() 575 576 return keyid[0]['id']
577
578 - def lookupSourceRPMs(self, hash):
579 self.__processHash('lookup_source_name', hash)
580
581 - def lookupPackageGroups(self, hash):
582 self.__processHash('lookup_package_group', hash)
583
584 - def lookupPackages(self, packages, checksums, ignore_missing=0):
585 # If nevra is enabled use checksum as primary key 586 self.validate_pks() 587 for package in packages: 588 if not isinstance(package, IncompletePackage): 589 raise TypeError("Expected an IncompletePackage instance, found %s" % 590 str(type(package))) 591 for package in packages: 592 # here we need to figure out which checksum we have in the database 593 not_found = None 594 for type, chksum in package['checksums'].items(): 595 package['checksum_type'] = type 596 package['checksum'] = chksum 597 package['checksum_id'] = checksums[(type, chksum)] 598 try: 599 self.__lookupObjectCollection([package], 'rhnPackage') 600 not_found = None 601 break 602 except InvalidPackageError: 603 e = sys.exc_info()[1] 604 not_found = (e, sys.exc_info()[2]) 605 if not_found and not ignore_missing: 606 # package is not in database at all 607 raise_with_tb(not_found[0], not_found[1])
608
609 - def lookupChannelFamilies(self, hash):
610 if not hash: 611 return 612 sql = "select id from rhnChannelFamily where label = :label" 613 h = self.dbmodule.prepare(sql) 614 for k in hash.keys(): 615 h.execute(label=k) 616 row = h.fetchone_dict() 617 if row: 618 hash[k] = row['id']
619 # Else, it's an unsupported channel 620
621 - def lookup_kstree_types(self, hash):
622 return self._lookup_in_table('rhnKSTreeType', 'rhn_kstree_type_seq', 623 hash)
624
625 - def lookup_ks_install_types(self, hash):
626 return self._lookup_in_table('rhnKSInstallType', 627 'rhn_ksinstalltype_id_seq', hash)
628
629 - def _lookup_in_table(self, table_name, sequence_name, hash):
630 t = self.dbmodule.Table(table_name, 'label') 631 seq = self.dbmodule.Sequence(sequence_name) 632 to_insert = [] 633 to_update = [] 634 result = {} 635 for label, name in hash.items(): 636 row = t[label] 637 if not row: 638 row_id = seq.next() 639 result[label] = row_id 640 to_insert.append((label, name, row_id)) 641 continue 642 row_id = row['id'] 643 result[label] = row_id 644 if row['name'] != name: 645 to_update.append((label, name)) 646 continue 647 # Entry found in the table - nothing more to do 648 649 if to_insert: 650 # Have to insert rows 651 row_ids = [] 652 labels = [] 653 names = [] 654 for label, name, row_id in to_insert: 655 row_ids.append(row_id) 656 labels.append(label) 657 names.append(name) 658 659 sql = """ 660 insert into %s (id, label, name) values (:id, :label, :name) 661 """ 662 h = self.dbmodule.prepare(sql % table_name) 663 h.executemany(id=row_ids, label=labels, name=names) 664 665 if to_update: 666 labels = [] 667 names = [] 668 for label, name in to_update: 669 labels.append(label) 670 names.append(name) 671 672 sql = """ 673 update %s set name = :name where label = :label 674 """ 675 h = self.dbmodule.prepare(sql % table_name) 676 h.executemany(label=labels, name=names) 677 678 # Update the returning value 679 hash.clear() 680 hash.update(result) 681 return hash
682
683 - def processChannelArches(self, arches):
684 self.__processObjectCollection(arches, 'rhnChannelArch', 685 uploadForce=4, ignoreUploaded=1, severityLimit=4)
686
687 - def processPackageArches(self, arches):
688 self.__processObjectCollection(arches, 'rhnPackageArch', 689 uploadForce=4, ignoreUploaded=1, severityLimit=4)
690
691 - def processServerArches(self, arches):
692 self.__processObjectCollection(arches, 'rhnServerArch', 693 uploadForce=4, ignoreUploaded=1, severityLimit=4)
694
695 - def processCPUArches(self, arches):
696 self.__processObjectCollection(arches, 'rhnCPUArch', 697 uploadForce=4, ignoreUploaded=1, severityLimit=4)
698
699 - def processMasterOrgs(self, orgs):
700 self.__processObjectCollection(orgs, 'rhnISSMasterOrgs', 701 uploadForce=4, ignoreUploaded=1, severityLimit=4)
702
703 - def processOrgs(self, orgs):
704 self.__processObjectCollection(orgs, 'web_customer', 705 uploadForce=4, ignoreUploaded=1, severityLimit=4)
706
707 - def processServerPackageArchCompatMap(self, entries):
708 self.__populateTable('rhnServerPackageArchCompat', entries, 709 delete_extra=1)
710
711 - def processServerChannelArchCompatMap(self, entries):
712 self.__populateTable('rhnServerChannelArchCompat', entries, 713 delete_extra=1)
714
715 - def processChannelPackageArchCompatMap(self, entries):
716 self.__populateTable('rhnChannelPackageArchCompat', entries, 717 delete_extra=1)
718
719 - def processServerGroupServerArchCompatMap(self, entries):
720 self.__populateTable('rhnServerServerGroupArchCompat', entries, 721 delete_extra=1)
722
723 - def processPackages(self, packages, uploadForce=0, ignoreUploaded=0, 724 forceVerify=0, transactional=0):
725 # Insert/update the packages 726 self.validate_pks() 727 728 childTables = { 729 'rhnPackageProvides': 'package_id', 730 'rhnPackageRequires': 'package_id', 731 'rhnPackageConflicts': 'package_id', 732 'rhnPackageObsoletes': 'package_id', 733 'rhnPackageRecommends': 'package_id', 734 'rhnPackageSuggests': 'package_id', 735 'rhnPackageSupplements': 'package_id', 736 'rhnPackageEnhances': 'package_id', 737 'rhnPackageBreaks': 'package_id', 738 'rhnPackagePredepends': 'package_id', 739 'rhnPackageFile': 'package_id', 740 'rhnPackageChangeLogRec': 'package_id', 741 } 742 743 if CFG.has_key('package_import_skip_changelog') and CFG.package_import_skip_changelog: 744 del childTables['rhnPackageChangeLogRec'] 745 746 for package in packages: 747 if not isinstance(package, Package): 748 raise TypeError("Expected a Package instance") 749 750 tableList = copy.deepcopy(childTables) 751 752 # older sat packages wont have these fields 753 # avoid Null insertions 754 if package['header_start'] is None: 755 package['header_start'] = -1 756 package['header_end'] = -1 757 758 self.__processObjectCollection__([package, ], 'rhnPackage', tableList, 759 uploadForce=uploadForce, forceVerify=forceVerify, 760 ignoreUploaded=ignoreUploaded, severityLimit=1, 761 transactional=transactional)
762
763 - def processErrata(self, errata):
764 # Insert/update the packages 765 766 childTables = [ 767 'rhnChannelErrata', 768 'rhnErrataBugList', 769 'rhnErrataFile', 770 'rhnErrataKeyword', 771 'rhnErrataPackage', 772 'rhnErrataCVE', 773 ] 774 775 for erratum in errata: 776 if not isinstance(erratum, Erratum): 777 raise TypeError("Expected an Erratum instance") 778 779 return self.__processObjectCollection(errata, 'rhnErrata', childTables, 780 'errata_id', uploadForce=4, ignoreUploaded=1, forceVerify=1, 781 transactional=1)
782
784 785 # identify errata that were affected 786 affected_errata_ids = {} 787 for op_type in ['insert', 'update', 'delete']: 788 op_values = getattr(dml, op_type) 789 for table_name, values_hash in op_values.items(): 790 if table_name == 'rhnErrata': 791 field = 'id' 792 elif 'errata_id' in values_hash: 793 field = 'errata_id' 794 795 # Now we know in which field to look for changes 796 for erratum_id in values_hash[field]: 797 affected_errata_ids[erratum_id] = None 798 799 # Get affected channels 800 affected_channel_ids = {} 801 h = self.dbmodule.prepare(""" 802 select channel_id 803 from rhnChannelErrata 804 where errata_id = :errata_id 805 """) 806 for errata_id in affected_errata_ids.keys(): 807 h.execute(errata_id=errata_id) 808 809 channel_ids = h.fetchall_dict() or [] 810 channel_ids = [x['channel_id'] for x in channel_ids] 811 for channel_id in channel_ids: 812 affected_channel_ids[channel_id] = errata_id 813 814 # Now update the channels 815 update_channel = self.dbmodule.Procedure('rhn_channel.update_channel') 816 invalidate_ss = 0 817 818 for channel_id in affected_channel_ids.keys(): 819 update_channel(channel_id, invalidate_ss) 820 h = self.dbmodule.prepare(""" 821 select advisory from rhnErrata where id = :errata_id 822 """) 823 h.execute(errata_id=affected_channel_ids[channel_id]) 824 advisory = h.fetchone()[0] 825 826 channel = rhnChannel.Channel() 827 channel.load_by_id(channel_id) 828 taskomatic.add_to_repodata_queue(channel.get_label(), "errata", 829 advisory)
830
831 - def processKickstartTrees(self, ks_trees):
832 childTables = [ 833 'rhnKSTreeFile', 834 #'rhnKSTreeType', 835 #'rhnKSInstallType', 836 ] 837 self.__processObjectCollection(ks_trees, 'rhnKickstartableTree', 838 childTables, 'kstree_id', uploadForce=4, forceVerify=1, 839 ignoreUploaded=1, severityLimit=1, transactional=1)
840
841 - def queue_errata(self, errata, timeout=0):
842 # timeout is the numer of seconds we want the execution to be delayed 843 if not errata: 844 return 845 # Figure out the errata ids 846 errata_channel_ids = [] 847 for erratum in errata: 848 if erratum.ignored: 849 # Skip it 850 continue 851 if erratum.diff_result is not None: 852 if erratum.diff_result.level != 0: 853 # New or modified in some way, queue it 854 # XXX we may not want to do this for trivial changes, 855 # but not sure what trivial is 856 for cid in erratum['channels']: 857 errata_channel_ids.append( 858 (erratum.id, cid['channel_id'])) 859 860 if not errata_channel_ids: 861 # Nothing to do 862 return 863 864 hdel = self.dbmodule.prepare(""" 865 delete from rhnErrataQueue where errata_id = :errata_id 866 """) 867 868 h = self.dbmodule.prepare(""" 869 insert into rhnErrataQueue (errata_id, channel_id, next_action) 870 values (:errata_id, :channel_id, current_timestamp + numtodsinterval(:timeout, 'second')) 871 """) 872 errata_ids = [x[0] for x in errata_channel_ids] 873 channel_ids = [x[1] for x in errata_channel_ids] 874 timeouts = [timeout] * len(errata_ids) 875 hdel.executemany(errata_id=errata_ids) 876 return h.executemany(errata_id=errata_ids, channel_id=channel_ids, 877 timeout=timeouts)
878
879 - def processChannels(self, channels, base_channels):
880 childTables = [ 881 'rhnChannelFamilyMembers', 'rhnReleaseChannelMap', 882 ] 883 if base_channels: 884 childTables.append('rhnDistChannelMap') 885 self.__processObjectCollection(channels, 'rhnChannel', childTables, 886 'channel_id', uploadForce=4, ignoreUploaded=1, forceVerify=1)
887
888 - def orgTrustExists(self, org_id, trust_id):
889 sql = """ 890 select * 891 from rhnTrustedOrgs 892 where org_id = :org_id 893 and org_trust_id = :trust_id 894 """ 895 h = self.dbmodule.prepare(sql) 896 h.execute(org_id=org_id, trust_id=trust_id) 897 row = h.fetchone_dict() 898 if row: 899 return True 900 return False
901
902 - def clearChannelTrusts(self, label):
903 sql = """ 904 delete from rhnChannelTrust where channel_id = 905 (select id from rhnChannel where label = :label) 906 """ 907 h = self.dbmodule.prepare(sql) 908 h.execute(label=label)
909
910 - def processChannelTrusts(self, channel_trusts):
911 # Create channel trusts 912 insert = [[], []] 913 for trust in channel_trusts: 914 insert[0].append(trust['channel-label']) 915 insert[1].append(trust['org-id']) 916 sql = """ 917 insert into rhnChannelTrust (channel_id, org_trust_id) 918 values ((select id from rhnChannel where label = :label), 919 :org_id) 920 """ 921 h = self.dbmodule.prepare(sql) 922 h.executemany(label=insert[0], org_id=insert[1])
923
924 - def processChannelFamilies(self, channels):
925 childTables = [] 926 self.__processObjectCollection(channels, 'rhnChannelFamily', 927 childTables, 'channel_family_id', uploadForce=4, ignoreUploaded=1, 928 forceVerify=1)
929
930 - def processChannelFamilyMembers(self, channel_families):
931 # Channel families now contain channel memberships too 932 h_lookup_cfid = self.dbmodule.prepare(""" 933 select channel_family_id 934 from rhnChannelFamilyMembers 935 where channel_id = :channel_id 936 """) 937 cf_ids = [] 938 c_ids = [] 939 for cf in channel_families: 940 if 'private-channel-family' in cf['label']: 941 # Its a private channel family and channel family members 942 # will be different from server as this is most likely ISS 943 # sync. Don't compare and delete custom channel families. 944 continue 945 for cid in cf['channel_ids']: 946 # Look up channel families for this channel 947 h_lookup_cfid.execute(channel_id=cid) 948 row = h_lookup_cfid.fetchone_dict() 949 if row and row['channel_family_id'] == cf.id: 950 # Nothing to do here, we already have this mapping 951 continue 952 # need to delete this entry and add the one for the new 953 # channel family 954 cf_ids.append(cf.id) 955 c_ids.append(cid) 956 if not c_ids: 957 # We're done 958 return 959 960 hdel = self.dbmodule.prepare(""" 961 delete from rhnChannelFamilyMembers 962 where channel_id = :channel_id 963 """) 964 hins = self.dbmodule.prepare(""" 965 insert into rhnChannelFamilyMembers (channel_id, channel_family_id) 966 values (:channel_id, :channel_family_id) 967 """) 968 hdel.executemany(channel_id=c_ids) 969 hins.executemany(channel_family_id=cf_ids, channel_id=c_ids)
970
971 - def processChannelFamilyPermissions(self, channel_families):
972 # Since this is not evaluated in rhn_entitlements anymore, 973 # make channel families without org globally visible 974 975 cf_ids = [cf.id for cf in channel_families if 'private-channel-family' not in cf['label']] 976 977 h_public_sel = self.dbmodule.prepare(""" 978 select channel_family_id from rhnPublicChannelFamily 979 """) 980 h_public_sel.execute() 981 982 public_cf_in_db = [x['channel_family_id'] for x in h_public_sel.fetchall_dict() or []] 983 public_cf_to_insert = [x for x in cf_ids if x not in public_cf_in_db] 984 985 h_public_ins = self.dbmodule.prepare(""" 986 insert into rhnPublicChannelFamily (channel_family_id) 987 values (:channel_family_id) 988 """) 989 h_public_ins.executemany(channel_family_id=public_cf_to_insert)
990
991 - def processDistChannelMap(self, dcms):
992 dcmTable = self.tables['rhnDistChannelMap'] 993 lookup = TableLookup(dcmTable, self.dbmodule) 994 dmlobj = DML([dcmTable.name], self.tables) 995 996 for dcm in dcms: 997 if dcm.ignored: 998 # Skip it 999 continue 1000 h = lookup.query(dcm) 1001 row = h.fetchone_dict() 1002 if not row: 1003 extObject = {} 1004 _buildExternalValue(extObject, dcm, dcmTable) 1005 addHash(dmlobj.insert[dcmTable.name], extObject) 1006 # Since this table has all the columns in unique constraints, we 1007 # don't care much about updates 1008 1009 self.__doDML(dmlobj)
1010
1011 - def processChannelProduct(self, channel):
1012 """ Associate product with channel """ 1013 1014 channel['channel_product'] = channel['product_name'] 1015 channel['channel_product_version'] = channel['product_version'] 1016 channel['channel_product_beta'] = channel['product_beta'] 1017 channel['channel_product_id'] = self.lookupChannelProduct(channel) 1018 1019 if not channel['channel_product_id']: 1020 # If no channel product dont update 1021 return 1022 statement = self.dbmodule.prepare(""" 1023 UPDATE rhnChannel 1024 SET channel_product_id = :channel_product_id 1025 WHERE id = :id 1026 AND (channel_product_id is NULL 1027 OR channel_product_id <> :channel_product_id) 1028 """) 1029 1030 statement.execute(id=channel.id, 1031 channel_product_id=channel['channel_product_id'])
1032
1033 - def processChannelContentSources(self, channel):
1034 """ Associate content sources with channel """ 1035 1036 # Which content sources are assigned to this channel 1037 select_sql = self.dbmodule.prepare(""" 1038 select source_id from rhnChannelContentSource 1039 where channel_id = :channel_id 1040 """) 1041 1042 select_sql.execute(channel_id=channel.id) 1043 sources_in_db = [x['source_id'] for x in select_sql.fetchall_dict() or []] 1044 1045 # Which content sources should be assigned to this channel 1046 sources_needed = [] 1047 if 'content-sources' in channel and channel['content-sources']: 1048 for source in channel['content-sources']: 1049 sources_needed.append(self.lookupContentSource(source['label'])) 1050 1051 # What to delete and insert 1052 sources_to_delete = [x for x in sources_in_db if x not in sources_needed] 1053 sources_to_insert = [x for x in sources_needed if x not in sources_in_db] 1054 1055 delete_sql = self.dbmodule.prepare(""" 1056 delete from rhnChannelContentSource 1057 where source_id = :source_id 1058 and channel_id = :channel_id 1059 """) 1060 1061 insert_sql = self.dbmodule.prepare(""" 1062 insert into rhnChannelContentSource 1063 (source_id, channel_id) 1064 values (:source_id, :channel_id) 1065 """) 1066 1067 for source_id in sources_to_delete: 1068 delete_sql.execute(source_id=source_id, channel_id=channel.id) 1069 1070 for source_id in sources_to_insert: 1071 insert_sql.execute(source_id=source_id, channel_id=channel.id)
1072
1073 - def processProductNames(self, batch):
1074 """ Check if ProductName for channel in batch is already in DB. 1075 If not add it there. 1076 """ 1077 statement = self.dbmodule.prepare(""" 1078 insert into rhnProductName 1079 (id, label, name) 1080 values (sequence_nextval('rhn_productname_id_seq'), 1081 :product_label, :product_name) 1082 """) 1083 1084 for channel in batch: 1085 if not self.lookupProductNames(channel['label']): 1086 statement.execute(product_label=channel['label'], 1087 product_name=channel['name'])
1088
1089 - def processContentSources(self, batch):
1090 """ Insert content source into DB """ 1091 1092 childTables = ['rhnContentSourceSsl'] 1093 self.__processObjectCollection(batch, 'rhnContentSource', 1094 childTables, 'content_source_id', uploadForce=4, ignoreUploaded=1, 1095 forceVerify=1)
1096
1097 - def lookupContentSource(self, label):
1098 """ Get id for given content source """ 1099 1100 sql = self.dbmodule.prepare(""" 1101 select id from rhnContentSource where label = :label and org_id is null 1102 """) 1103 1104 sql.execute(label=label) 1105 1106 content_source = sql.fetchone_dict() 1107 1108 if content_source: 1109 return content_source['id'] 1110 1111 return
1112
1113 - def lookupContentSourceType(self, label):
1114 """ Get id for given content type label """ 1115 1116 sql = self.dbmodule.prepare(""" 1117 select id from rhnContentSourceType where label = :label 1118 """) 1119 1120 sql.execute(label=label) 1121 1122 source_type = sql.fetchone_dict() 1123 1124 if source_type: 1125 return source_type['id'] 1126 1127 return
1128
1129 - def lookupProductNames(self, label):
1130 """ For given label of product return its id. 1131 If product do not exist return None 1132 """ 1133 statement = self.dbmodule.prepare(""" 1134 SELECT id 1135 FROM rhnProductName 1136 WHERE label = :label 1137 """) 1138 1139 statement.execute(label=label) 1140 1141 product = statement.fetchone_dict() 1142 1143 if product: 1144 return product['id'] 1145 1146 return
1147 1148 # bug #528227
1149 - def lookupChannelOrg(self, label):
1150 """For given label of channel return its org_id. 1151 If channel with given label does not exist or is NULL, return None. 1152 """ 1153 statement = self.dbmodule.prepare(""" 1154 SELECT org_id 1155 FROM rhnChannel 1156 WHERE label = :label 1157 """) 1158 1159 statement.execute(label=label) 1160 org_id = statement.fetchone_dict() 1161 1162 if org_id: 1163 return org_id 1164 1165 return
1166
1167 - def lookupChannelProduct(self, channel):
1168 statement = self.dbmodule.prepare(""" 1169 SELECT id 1170 FROM rhnChannelProduct 1171 WHERE product = :product 1172 AND version = :version 1173 AND beta = :beta 1174 """) 1175 1176 statement.execute(product=channel['channel_product'], 1177 version=channel['channel_product_version'], 1178 beta=channel['channel_product_beta']) 1179 1180 product = statement.fetchone_dict() 1181 1182 if product: 1183 return product['id'] 1184 1185 return self.createChannelProduct(channel)
1186
1187 - def createChannelProduct(self, channel):
1188 id = self.sequences['rhnChannelProduct'].next() 1189 1190 statement = self.dbmodule.prepare(""" 1191 INSERT 1192 INTO rhnChannelProduct 1193 (id, product, version, beta) 1194 VALUES (:id, :product, :version, :beta) 1195 """) 1196 1197 statement.execute(id=id, 1198 product=channel['channel_product'], 1199 version=channel['channel_product_version'], 1200 beta=channel['channel_product_beta']) 1201 1202 return id
1203
1204 - def subscribeToChannels(self, packages, strict=0):
1205 hash = { 1206 'package_id': [], 1207 'channel_id': [], 1208 } 1209 # Keep a list of packages for a channel too, so we can easily compare 1210 # what's extra, if strict is 1 1211 channel_packages = {} 1212 sql = """ 1213 select channel_id 1214 from rhnChannelPackage 1215 where package_id = :package_id""" 1216 affected_channels = {} 1217 statement = self.dbmodule.prepare(sql) 1218 for package in packages: 1219 if package.ignored: 1220 # Skip it 1221 continue 1222 if package.id is None: 1223 raise InvalidPackageError(package, "Invalid package") 1224 # Look it up first 1225 statement.execute(package_id=package.id) 1226 channels = {} 1227 while 1: 1228 row = statement.fetchone_dict() 1229 if not row: 1230 break 1231 channels[row['channel_id']] = None 1232 1233 for channelId in package['channels'].keys(): 1234 # Build the channel-package list 1235 if channelId in channel_packages: 1236 cp = channel_packages[channelId] 1237 else: 1238 channel_packages[channelId] = cp = {} 1239 cp[package.id] = None 1240 1241 if channelId in channels: 1242 # Already subscribed 1243 continue 1244 dict = { 1245 'package_id': package.id, 1246 'channel_id': channelId, 1247 } 1248 if channelId not in affected_channels: 1249 modified_packages = ([], []) 1250 affected_channels[channelId] = modified_packages 1251 else: 1252 modified_packages = affected_channels[channelId] 1253 # Package was added to this channel 1254 modified_packages[0].append(package.id) 1255 addHash(hash, dict) 1256 1257 # Packages we'd have to delete 1258 extra_cp = { 1259 'package_id': [], 1260 'channel_id': [], 1261 } 1262 if strict: 1263 # if strict remove the extra packages from the DB 1264 sql = """ 1265 select package_id 1266 from rhnChannelPackage 1267 where channel_id = :channel_id 1268 """ 1269 else: 1270 # or at least we should delete packages from different org 1271 sql = """ 1272 select package_id 1273 from rhnChannelPackage cp 1274 join rhnPackage p 1275 on p.id = cp.package_id 1276 join rhnChannel c 1277 on c.id = cp.channel_id 1278 where cp.channel_id = :channel_id 1279 and c.org_id != p.org_id 1280 """ 1281 1282 statement = self.dbmodule.prepare(sql) 1283 for channel_id, pid_hash in channel_packages.items(): 1284 statement.execute(channel_id=channel_id) 1285 while 1: 1286 row = statement.fetchone_dict() 1287 if not row: 1288 break 1289 package_id = row['package_id'] 1290 if package_id not in pid_hash: 1291 # Have to remove it 1292 extra_cp['package_id'].append(package_id) 1293 extra_cp['channel_id'].append(channel_id) 1294 # And mark this channel as being affected 1295 if channel_id not in affected_channels: 1296 modified_packages = ([], []) 1297 affected_channels[channel_id] = modified_packages 1298 else: 1299 modified_packages = affected_channels[channel_id] 1300 # Package was deletef from this channel 1301 modified_packages[1].append(package_id) 1302 1303 self.__doDeleteTable('rhnChannelPackage', extra_cp) 1304 self.__doInsertTable('rhnChannelPackage', hash) 1305 # This function returns the channels that were affected 1306 return affected_channels
1307
1308 - def update_newest_package_cache(self, caller, affected_channels, name_ids=[]):
1309 # affected_channels is a hash keyed on the channel id, and with a 1310 # tuple (added_package_list, deleted_package_list) as values 1311 refresh_newest_package = self.dbmodule.Procedure('rhn_channel.refresh_newest_package') 1312 update_channel = self.dbmodule.Procedure('rhn_channel.update_channel') 1313 for channel_id, (added_packages_list, deleted_packages_list) in affected_channels.items(): 1314 try: 1315 if name_ids: 1316 for id in name_ids: 1317 refresh_newest_package(channel_id, caller, id) 1318 else: 1319 refresh_newest_package(channel_id, caller, None) 1320 except rhnSQL.SQLError: 1321 e = sys.exc_info()[1] 1322 raise_with_tb(rhnFault(23, str(e[1]), explain=0), sys.exc_info()[2]) 1323 if deleted_packages_list: 1324 invalidate_ss = 1 1325 else: 1326 invalidate_ss = 0 1327 update_channel(channel_id, invalidate_ss)
1328
1329 - def processSourcePackages(self, packages, uploadForce=0, ignoreUploaded=0, 1330 forceVerify=0, transactional=0):
1331 # Insert/update the packages 1332 1333 childTables = [] 1334 1335 for package in packages: 1336 if not isinstance(package, SourcePackage): 1337 raise TypeError("Expected a Package instance") 1338 1339 # Process the packages 1340 1341 self.__processObjectCollection(packages, 'rhnPackageSource', childTables, 1342 'package_id', uploadForce=uploadForce, forceVerify=forceVerify, 1343 ignoreUploaded=ignoreUploaded, severityLimit=1, 1344 transactional=transactional)
1345
1346 - def commit(self):
1347 self.dbmodule.commit()
1348
1349 - def rollback(self):
1350 self.dbmodule.rollback()
1351
1352 - def __processHash(self, lookup, hash):
1353 if not hash: 1354 # Nothing to do 1355 return 1356 1357 h = rhnSQL.prepare("select " + lookup + "(:name) from dual") 1358 for k in hash.keys(): 1359 h.execute(name=k) 1360 # saving id 1361 hash[k] = h.fetchone_dict().popitem()[1]
1362
1363 - def __buildQueries(self, childTables):
1364 childTableLookups = {} 1365 queryTempl = "select * from %s where %s = :id" 1366 for childTableName in childTables: 1367 childTableLookups[childTableName] = self.dbmodule.prepare( 1368 queryTempl % (childTableName, childTables[childTableName])) 1369 return childTableLookups
1370
1371 - def __processObjectCollection(self, objColl, parentTable, childTables=[], 1372 colname=None, **kwargs):
1373 # Returns the DML object that was processed 1374 # This helps identify what the changes were 1375 1376 # XXX this is a total hack keeping tranlating the old interface into the 1377 # new interface to keep me from having to change all the places in the 1378 # code that call this method, as there are 10 of them... 1379 1380 childDict = {} 1381 1382 for tbl in childTables: 1383 childDict[tbl] = colname 1384 1385 return self.__processObjectCollection__(objColl, parentTable, childDict, **kwargs)
1386
1387 - def __processObjectCollection__(self, objColl, parentTable, childTables={}, 1388 **kwargs):
1389 # Returns the DML object that was processed 1390 # This helps identify what the changes were 1391 1392 # FIXME I need to break this method apart into smaller method calls that 1393 # will allow *different* colname fields for different childTables 1394 # NOTE objColl == packages 1395 # Process the object collection, starting with parentTable, having 1396 # colname as a link column between the parent and child tables 1397 # 1398 # We create a DML object for the operations we're supposed to perform 1399 # on the database 1400 kwparams = { 1401 # The 'upload force' 1402 'uploadForce': 0, 1403 # Raises exceptions when the object is already uploaded 1404 'ignoreUploaded': 0, 1405 # Forces a full object verification - including the child tables 1406 'forceVerify': 0, 1407 # When the severity is below this limit, the object is not 1408 # updated 1409 'severityLimit': 0, 1410 # All-or-nothing 1411 'transactional': 0, 1412 } 1413 1414 for k, v in kwargs.items(): 1415 if k not in kwparams: 1416 raise TypeError("Unknown keyword parameter %s" % k) 1417 if v is not None: 1418 # Leave the default values in case of a None 1419 kwparams[k] = v 1420 1421 uploadForce = kwparams['uploadForce'] 1422 ignoreUploaded = kwparams['ignoreUploaded'] 1423 severityLimit = kwparams['severityLimit'] 1424 transactional = kwparams['transactional'] 1425 forceVerify = kwparams['forceVerify'] 1426 1427 # All the tables affected 1428 tables = [parentTable] + list(childTables.keys()) 1429 1430 # Build the hash for the operations on the tables 1431 dml = DML(tables, self.tables) 1432 # Reverse hash: object id to object for already-uploaded objects 1433 uploadedObjects = {} 1434 # Information related to the parent table 1435 parentTableObj = self.tables[parentTable] 1436 ptFields = parentTableObj.getFields() 1437 severityHash = parentTableObj.getSeverityHash() 1438 1439 # A flag that indicates if something has to be modified beyond the 1440 # current severity limit 1441 brokenTransaction = 0 1442 1443 # Lookup object 1444 lookup = TableLookup(parentTableObj, self.dbmodule) 1445 # XXX 1446 childTableLookups = self.__buildQueries(childTables) 1447 # For each valid object in the collection, look it up 1448 # if it doesn't exist, insert all the associated information 1449 # if it already exists: 1450 # save it in the uploadedObjects hash for later processing 1451 # the object's diff member will contain data if that object 1452 # failed to push; the content should be explicit enough about 1453 # what failed 1454 # The object's diff_result should reliably say if the object was 1455 # different in any way, or if it was new. Each field that gets 1456 # compared will present its won severity field (or the default 1457 # one if not explicitly specified). The "global" severity is the 1458 # max of all severities. 1459 # New objects will have a diff level of -1 1460 for object in objColl: 1461 if object.ignored: 1462 # Skip it 1463 continue 1464 h = lookup.query(object) 1465 row = h.fetchone_dict() 1466 if not row: 1467 # Object does not exist 1468 id = self.sequences[parentTable].next() 1469 object.id = id 1470 extObject = {'id': id} 1471 _buildExternalValue(extObject, object, parentTableObj) 1472 addHash(dml.insert[parentTable], extObject) 1473 1474 # Insert child table information 1475 for tname in childTables: 1476 tbl = self.tables[tname] 1477 # Get the list of objects for this package 1478 entry_list = object[tbl.getAttribute()] 1479 if entry_list is None: 1480 continue 1481 for entry in entry_list: 1482 extObject = {childTables[tname]: id} 1483 seq_col = tbl.sequenceColumn 1484 if seq_col: 1485 # This table has to insert values in a sequenced 1486 # column; since it's a child table and the entry 1487 # in the master table is not created yet, there 1488 # shouldn't be a problem with uniqueness 1489 # constraints 1490 new_id = self.sequences[tbl.name].next() 1491 extObject[seq_col] = new_id 1492 # Make sure we initialize the object's sequenced 1493 # column as well 1494 entry[seq_col] = new_id 1495 _buildExternalValue(extObject, entry, tbl) 1496 addHash(dml.insert[tname], extObject) 1497 object.diff_result = Diff() 1498 # New object 1499 object.diff_result.level = -1 1500 continue 1501 1502 # Already uploaded 1503 if not ignoreUploaded: 1504 raise AlreadyUploadedError(object, "Already uploaded") 1505 1506 # XXX package id set here!!!!!!!!!! 1507 object.id = row['id'] 1508 # And save the object and the row for later processing 1509 uploadedObjects[row['id']] = [object, row] 1510 1511 # Deal with already-uploaded objects 1512 for objid, (object, row) in uploadedObjects.items(): 1513 # Build the external value 1514 extObject = {'id': row['id']} 1515 _buildExternalValue(extObject, object, parentTableObj) 1516 # Build the DB value 1517 row = _buildDatabaseValue(row, ptFields) 1518 # compare them 1519 object.diff = object.diff_result = Diff() 1520 diffval = computeDiff(extObject, row, severityHash, object.diff) 1521 if not forceVerify: 1522 # If there is enough karma, force the full object check 1523 # maybe they want the object overwritten 1524 if uploadForce < object.diff.level and diffval <= severityLimit: 1525 # Same object, or not different enough 1526 # not enough karma either 1527 continue 1528 1529 localDML = self.__processUploaded(objid, object, childTables, 1530 childTableLookups) 1531 1532 if uploadForce < object.diff.level: 1533 # Not enough karma 1534 if object.diff.level > severityLimit: 1535 # Broken transaction - object is too different 1536 brokenTransaction = 1 1537 continue 1538 1539 # Clean up the object diff since we pushed the package 1540 object.diff = None 1541 1542 if diffval: 1543 # Different parent object 1544 localDML['update'][parentTable] = [extObject] 1545 1546 # And transfer the local DML to the global one 1547 for k, tablehash in localDML.items(): 1548 dmlhash = getattr(dml, k) 1549 for tname, vallist in tablehash.items(): 1550 for val in vallist: 1551 addHash(dmlhash[tname], val) 1552 1553 if transactional and brokenTransaction: 1554 raise TransactionError("Error uploading package source batch") 1555 return self.__doDML(dml)
1556
1557 - def __processUploaded(self, objid, object, childTables, childTableLookups):
1558 # Store the DML operations locally 1559 localDML = { 1560 'insert': {}, 1561 'update': {}, 1562 'delete': {}, 1563 } 1564 1565 # Grab the rest of the information 1566 childTablesInfo = self.__getChildTablesInfo(objid, list(childTables.keys()), 1567 childTableLookups) 1568 1569 # Start computing deltas 1570 for childTableName in childTables: 1571 # Init the local hashes 1572 for k in ['insert', 'update', 'delete']: 1573 localDML[k][childTableName] = [] 1574 1575 dbside = childTablesInfo[childTableName] 1576 # The child table object 1577 childTableObj = self.tables[childTableName] 1578 # The name of the attribute in the parent object 1579 parentattr = childTableObj.getAttribute() 1580 # The list of entries associated with the attribute linked to 1581 # this table 1582 entrylist = object[parentattr] 1583 fields = childTableObj.getFields() 1584 pks = childTableObj.getPK() 1585 childSeverityHash = childTableObj.getSeverityHash() 1586 if entrylist is None: 1587 continue 1588 for ent in entrylist: 1589 # Build the primary key 1590 key = [] 1591 for f in pks: 1592 if f == childTables[childTableName]: 1593 # Special-case it 1594 key.append(objid) 1595 continue 1596 datatype = fields[f] 1597 # Get the proper attribute name for this column 1598 attr = childTableObj.getObjectAttribute(f) 1599 key.append(sanitizeValue(ent[attr], datatype)) 1600 key = tuple(key) 1601 # Build the value 1602 val = {childTables[childTableName]: objid} 1603 if childTableObj.sequenceColumn: 1604 # Initialize the sequenced column with a dummy value 1605 ent[childTableObj.sequenceColumn] = None 1606 _buildExternalValue(val, ent, childTableObj) 1607 1608 # Look this value up 1609 if key not in dbside: 1610 if childTableObj.sequenceColumn: 1611 # Initialize the sequence column too 1612 sc = childTableObj.sequenceColumn 1613 nextid = self.sequences[childTableName].next() 1614 val[sc] = ent[sc] = nextid 1615 # This entry has to be inserted 1616 object.diff.append((parentattr, val, None)) 1617 # XXX change to a default value 1618 object.diff.setLevel(4) 1619 1620 localDML['insert'][childTableName].append(val) 1621 continue 1622 1623 # Already exists in the DB 1624 dbval = _buildDatabaseValue(dbside[key], fields) 1625 1626 if childTableObj.sequenceColumn: 1627 # Copy the sequenced value - we dpn't want it updated 1628 sc = childTableObj.sequenceColumn 1629 val[sc] = ent[sc] = dbval[sc] 1630 # check for updates 1631 diffval = computeDiff(val, dbval, childSeverityHash, 1632 object.diff, parentattr) 1633 if not diffval: 1634 # Same value 1635 del dbside[key] 1636 continue 1637 1638 # Different value; have to update the entry 1639 localDML['update'][childTableName].append(val) 1640 del dbside[key] 1641 1642 if childTableName == 'rhnErrataPackage': 1643 continue 1644 1645 # Anything else should be deleted 1646 for key, val in dbside.items(): 1647 # Send only the PKs 1648 hash = {} 1649 for k in pks: 1650 hash[k] = val[k] 1651 1652 # XXX change to a default value 1653 object.diff.setLevel(4) 1654 1655 localDML['delete'][childTableName].append(hash) 1656 object.diff.append((parentattr, None, val)) 1657 1658 return localDML
1659
1660 - def __doDML(self, dml):
1661 self.__doDelete(dml.delete, dml.tables) 1662 self.__doUpdate(dml.update, dml.tables) 1663 self.__doInsert(dml.insert, dml.tables) 1664 return dml
1665
1666 - def __doInsert(self, hash, tables):
1667 for tname in tables: 1668 dict = hash[tname] 1669 try: 1670 self.__doInsertTable(tname, dict) 1671 except rhnSQL.SQLError: 1672 e = sys.exc_info()[1] 1673 raise_with_tb(rhnFault(54, str(e[1]), explain=0), sys.exc_info()[2])
1674
1675 - def __doInsertTable(self, table, hash):
1676 if not hash: 1677 return 1678 tab = self.tables[table] 1679 k = list(hash.keys())[0] 1680 if not hash[k]: 1681 # Nothing to do 1682 return 1683 1684 insertObj = TableInsert(tab, self.dbmodule) 1685 insertObj.query(hash) 1686 return
1687
1688 - def __doDelete(self, hash, tables):
1689 for tname in tables: 1690 dict = hash[tname] 1691 self.__doDeleteTable(tname, dict)
1692
1693 - def __doDeleteTable(self, tname, hash):
1694 if not hash: 1695 return 1696 tab = self.tables[tname] 1697 # Need to extract the primary keys and look for items to delete only 1698 # in those columns, the other ones may not be populated 1699 # See bug 154216 for details (misa 2005-04-08) 1700 pks = tab.getPK() 1701 k = pks[0] 1702 if not hash[k]: 1703 # Nothing to do 1704 return 1705 deleteObj = TableDelete(tab, self.dbmodule) 1706 deleteObj.query(hash)
1707
1708 - def __doUpdate(self, hash, tables):
1709 for tname in tables: 1710 dict = hash[tname] 1711 self.__doUpdateTable(tname, dict)
1712
1713 - def __doUpdateTable(self, tname, hash):
1714 if not hash: 1715 return 1716 tab = self.tables[tname] 1717 # See bug 154216 for details (misa 2005-04-08) 1718 pks = tab.getPK() 1719 k = pks[0] 1720 if not hash[k]: 1721 # Nothing to do 1722 return 1723 updateObj = TableUpdate(tab, self.dbmodule) 1724 updateObj.query(hash) 1725 return
1726
1727 - def __lookupObjectCollection(self, objColl, tableName, ignore_missing=0):
1728 # Looks the object up in tableName, and fills in its id 1729 lookup = TableLookup(self.tables[tableName], self.dbmodule) 1730 for object in objColl: 1731 if object.ignored: 1732 # Skip it 1733 continue 1734 h = lookup.query(object) 1735 row = h.fetchone_dict() 1736 if not row: 1737 if ignore_missing: 1738 # Ignore the missing objects 1739 object.ignored = 1 1740 continue 1741 # Invalid 1742 raise InvalidPackageError(object, "Could not find object %s in table %s" % (object, tableName)) 1743 object.id = row['id']
1744
1745 - def __getChildTablesInfo(self, id, tables, queries):
1746 # Returns a hash with the information about package id from tables 1747 result = {} 1748 for tname in tables: 1749 tableobj = self.tables[tname] 1750 fields = tableobj.getFields() 1751 q = queries[tname] 1752 q.execute(id=id) 1753 hash = {} 1754 while 1: 1755 row = q.fetchone_dict() 1756 if not row: 1757 break 1758 pks = tableobj.getPK() 1759 key = [] 1760 for f in pks: 1761 value = row[f] 1762 datatype = fields[f] 1763 value = sanitizeValue(value, datatype) 1764 key.append(value) 1765 val = {} 1766 for f, datatype in fields.items(): 1767 value = row[f] 1768 value = sanitizeValue(value, datatype) 1769 val[f] = value 1770 hash[tuple(key)] = val 1771 1772 result[tname] = hash 1773 return result
1774
1775 - def __populateTable(self, table_name, data, delete_extra=1):
1776 table = self.tables[table_name] 1777 fields = table.getFields() 1778 # Build a hash with the incoming data 1779 incoming = {} 1780 for entry in data: 1781 t = hash2tuple(entry, fields) 1782 incoming[t] = entry 1783 1784 # Build the query to dump the table's contents 1785 h = self.dbmodule.prepare("select * from %s" % table.name) 1786 h.execute() 1787 deletes = {} 1788 inserts = {} 1789 for f in fields.keys(): 1790 inserts[f] = [] 1791 deletes[f] = [] 1792 1793 while 1: 1794 row = h.fetchone_dict() 1795 if not row: 1796 break 1797 1798 t = hash2tuple(row, fields) 1799 if t in incoming: 1800 # we already have this value uploaded 1801 del incoming[t] 1802 continue 1803 addHash(deletes, row) 1804 1805 for row in incoming.values(): 1806 addHash(inserts, row) 1807 1808 if delete_extra: 1809 self.__doDeleteTable(table.name, deletes) 1810 self.__doInsertTable(table.name, inserts)
1811 1812 # This function does a diff on the specified table name for the presented 1813 # data, using pk_fields as unique fields
1814 - def _do_diff(self, data, table_name, uq_fields, fields):
1815 first_uq_col = uq_fields[0] 1816 uq_col_values = {} 1817 all_fields = uq_fields + fields 1818 for entry in data: 1819 for f in all_fields: 1820 if f not in entry: 1821 raise Exception("Missing field %s" % f) 1822 val = entry[first_uq_col] 1823 if val not in uq_col_values: 1824 valhash = {} 1825 uq_col_values[val] = valhash 1826 else: 1827 valhash = uq_col_values[val] 1828 key = build_key(entry, uq_fields) 1829 valhash[key] = entry 1830 1831 query = "select %s from %s where %s = :%s" % ( 1832 string.join(all_fields, ", "), 1833 table_name, 1834 first_uq_col, first_uq_col, 1835 ) 1836 h = self.dbmodule.prepare(query) 1837 updates = [] 1838 deletes = [] 1839 for val, valhash in uq_col_values.items(): 1840 params = {first_uq_col: val} 1841 h.execute(**params) 1842 while 1: 1843 row = h.fetchone_dict() 1844 if not row: 1845 break 1846 key = build_key(row, uq_fields) 1847 if key not in valhash: 1848 # Need to delete this one 1849 deletes.append(row) 1850 continue 1851 entry = valhash[key] 1852 for f in fields: 1853 if entry[f] != row[f]: 1854 # Different, we have to update 1855 break 1856 else: 1857 # Same value, remove it from valhash 1858 del valhash[key] 1859 continue 1860 # Need to update 1861 updates.append(entry) 1862 1863 inserts = [] 1864 list(map(inserts.extend, [list(x.values()) for x in list(uq_col_values.values())])) 1865 1866 if deletes: 1867 params = transpose(deletes, uq_fields) 1868 query = "delete from %s where %s" % ( 1869 table_name, 1870 string.join(["%s = :%s" % (x, x) for x in uq_fields], 1871 ' and '), 1872 ) 1873 h = self.dbmodule.prepare(query) 1874 h.executemany(**params) 1875 if inserts: 1876 params = transpose(inserts, all_fields) 1877 query = "insert into %s (%s) values (%s)" % ( 1878 table_name, 1879 string.join(all_fields, ', '), 1880 string.join([":" + x for x in all_fields], ', '), 1881 ) 1882 h = self.dbmodule.prepare(query) 1883 h.executemany(**params) 1884 if updates: 1885 params = transpose(updates, all_fields) 1886 query = "update % set %s where %s" % ( 1887 table_name, 1888 string.join(["%s = :s" + (x, x) for x in fields], 1889 ', '), 1890 string.join(["%s = :%s" % (x, x) for x in uq_fields], 1891 ' and '), 1892 ) 1893 h = self.dbmodule.prepare(query) 1894 h.executemany(**params)
1895
1896 - def validate_pks(self):
1897 # If nevra is enabled use checksum as primary key 1898 tbs = self.tables['rhnPackage'] 1899 if not CFG.ENABLE_NVREA: 1900 # remove checksum from a primary key if nevra is disabled. 1901 if 'checksum_id' in tbs.pk: 1902 tbs.pk.remove('checksum_id')
1903 1904 # Returns a tuple for the hash's values 1905 1906
1907 -def build_key(hash, fields):
1908 return tuple(map(lambda x, h=hash: h[x], fields))
1909 1910
1911 -def transpose(arrhash, fields):
1912 params = {} 1913 for f in fields: 1914 params[f] = [] 1915 for h in arrhash: 1916 for f in fields: 1917 params[f].append(h[f]) 1918 return params
1919 1920
1921 -def hash2tuple(hash, fields):
1922 # Converts the hash into a tuple, with the fields ordered as presented in 1923 # the fields list 1924 result = [] 1925 for fname, ftype in fields.items(): 1926 result.append(sanitizeValue(hash[fname], ftype)) 1927 return tuple(result)
1928 1929
1930 -class DML:
1931
1932 - def __init__(self, tables, tableHash):
1933 self.update = {} 1934 self.delete = {} 1935 self.insert = {} 1936 self.tables = tables 1937 for k in ('insert', 'update', 'delete'): 1938 dmlhash = {} 1939 setattr(self, k, dmlhash) 1940 for tname in tables: 1941 hash = {} 1942 for f in tableHash[tname].getFields().keys(): 1943 hash[f] = [] 1944 dmlhash[tname] = hash
1945 1946
1947 -def _buildDatabaseValue(row, fieldsHash):
1948 # Returns a dictionary containing the interesting values of the row, 1949 # sanitized 1950 dict = {} 1951 for f, datatype in fieldsHash.items(): 1952 dict[f] = sanitizeValue(row[f], datatype) 1953 return dict
1954 1955
1956 -def _buildExternalValue(dict, entry, tableObj):
1957 # updates dict with values from entry 1958 # entry is a hash-like object (non-db) 1959 for f, datatype in tableObj.getFields().items(): 1960 if f in dict: 1961 # initialized somewhere else 1962 continue 1963 # Get the attribute's name 1964 attr = tableObj.getObjectAttribute(f) 1965 # Sanitize the value according to its datatype 1966 if attr not in entry: 1967 entry[attr] = None 1968 dict[f] = sanitizeValue(entry[attr], datatype)
1969 1970
1971 -def computeDiff(hash1, hash2, diffHash, diffobj, prefix=None):
1972 # Compare if the key-values of hash1 are a subset of hash2's 1973 difference = 0 1974 ignore_keys = ['last_modified'] 1975 1976 for k, v in hash1.items(): 1977 if k in ignore_keys: 1978 # Dont decide the diff based on last_modified 1979 # as this obviously wont match due to our db 1980 # other triggers. 1981 continue 1982 if hash2[k] == v: 1983 # Same values 1984 continue 1985 if k == 'installed_size' and v is not None and hash2[k] is None: 1986 # Skip installed_size which might not have been populated 1987 continue 1988 if k in diffHash: 1989 diffval = diffHash[k] 1990 if diffval == 0: 1991 # Completely ignore this key 1992 continue 1993 else: 1994 diffval = diffobj.level + 1 1995 1996 if prefix: 1997 diffkey = prefix + '::' + k 1998 else: 1999 diffkey = k 2000 2001 diffobj.setLevel(diffval) 2002 diffobj.append((diffkey, v, hash2[k])) 2003 2004 difference = diffobj.level 2005 2006 return difference
2007