-
Notifications
You must be signed in to change notification settings - Fork 82
/
misc.py
1326 lines (1125 loc) · 41.5 KB
/
misc.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#! /usr/bin/python -tt
"""
Assorted utility functions for yum.
"""
import types
import os
import sys
import os.path
from cStringIO import StringIO
import base64
import binascii
import struct
import re
import errno
import Errors
import constants
import pgpmsg
import tempfile
import glob
import pwd
import fnmatch
import bz2
import gzip
import shutil
import urllib
import string
import platform
_available_compression = ['gz', 'bz2']
try:
import lzma
_available_compression.append('xz')
except ImportError:
lzma = None
from rpmUtils.miscutils import stringToVersion, flagToString
from stat import *
class GpgmeAdapter(object):
"""Wrapper for the old gpg API."""
class errors(object):
class GPGMEError(Exception):
pass
class BadSignatures(Exception):
pass
class Context(object):
def __init__(self):
self.ctx = gpgme.Context()
def __enter__(self):
return self
def __exit__(self, *args):
pass
def op_import(self, rawkey):
keyf = StringIO(rawkey)
imp = self.ctx.import_(keyf)
keyf.close()
# Ultimately trust the keys
for import_status in imp.imports:
fpr = impport_status[0]
key = self.ctx.get_key(fpr)
gpgme.editutil.edit_trust(self.ctx, key, gpgme.VALIDITY_ULTIMATE)
def verify(self, signed_text, sig, plaintext):
try:
sigs = self.ctx.verify(sig, signed_text, plaintext)
except gpgme.GpgmeError as e:
raise GpgmeAdapter.errors.GPGMEError()
for sig in sigs:
# Check that at least one sig is recognized as valid.
if sig.validity in (
gpgme.VALIDITY_FULL, gpgme.VALIDITY_MARGINAL,
gpgme.VALIDITY_ULTIMATE):
return
raise GpgmeAdapter.errors.BadSignatures()
def __getattr__(self, name):
return getattr(self.ctx, name)
def __getattr__(self, name):
return getattr(gpgme, name)
try:
# Official GnuPG Python binding (not available on CentOS/RHEL <= 7)
import gpg
except ImportError:
# Alternative fallback implementation (not available on Fedora any more)
import gpgme
import gpgme.editutil
gpg = GpgmeAdapter()
try:
import hashlib
_available_checksums = set(['md5', 'sha1', 'sha256', 'sha384', 'sha512'])
except ImportError:
# Python-2.4.z ... gah!
import sha
import md5
_available_checksums = set(['md5', 'sha1'])
class hashlib:
@staticmethod
def new(algo):
if algo == 'md5':
return md5.new()
if algo == 'sha1':
return sha.new()
raise ValueError, "Bad checksum type"
# some checksum types might be disabled
_fips_noncompliant = set()
for ctype in list(_available_checksums):
try:
hashlib.new(ctype)
except Exception as e:
# Print an error unless this is due to FIPS mode (in which case it's
# not really an error and we don't want to pollute the output
# needlessly; if someone actually tries to instantiate a Checksum with
# a FIPS non-compliant ctype, we'll raise an explanatory exception
# anyway).
if isinstance(e, ValueError) and str(e).endswith('disabled for fips'):
_fips_noncompliant.add(ctype)
else:
print >> sys.stderr, 'Checksum type %s disabled' % repr(ctype)
_available_checksums.remove(ctype)
for ctype in 'sha256', 'sha1':
if ctype in _available_checksums:
_default_checksums = [ctype]
break
else:
raise ImportError, 'broken hashlib'
from Errors import MiscError, FIPSNonCompliantError
# These are API things, so we can't remove them even if they aren't used here.
# pylint: disable-msg=W0611
from i18n import to_utf8, to_unicode
# pylint: enable-msg=W0611
_share_data_store = {}
_share_data_store_u = {}
def share_data(value):
""" Take a value and use the same value from the store,
if the value isn't in the store this one becomes the shared version. """
# We don't want to change the types of strings, between str <=> unicode
# and hash('a') == hash(u'a') ... so use different stores.
# In theory eventaully we'll have all of one type, but don't hold breath.
store = _share_data_store
if isinstance(value, unicode):
store = _share_data_store_u
# hahahah, of course the above means that:
# hash(('a', 'b')) == hash((u'a', u'b'))
# ...which we have in deptuples, so just screw sharing those atm.
if type(value) == types.TupleType:
return value
return store.setdefault(value, value)
def unshare_data():
global _share_data_store
global _share_data_store_u
_share_data_store = {}
_share_data_store_u = {}
_re_compiled_glob_match = None
def re_glob(s):
""" Tests if a string is a shell wildcard. """
# TODO/FIXME maybe consider checking if it is a stringsType before going on - otherwise
# returning None
global _re_compiled_glob_match
if _re_compiled_glob_match is None:
_re_compiled_glob_match = re.compile('[*?]|\[.+\]').search
return _re_compiled_glob_match(s)
def compile_pattern(pat, ignore_case=False):
""" Compile shell wildcards, return a 'match' function. """
if re_glob(pat):
try:
flags = ignore_case and re.I or 0
return re.compile(fnmatch.translate(pat), flags).match
except re.error:
pass # fall back to exact match
if ignore_case:
pat = pat.lower()
return lambda s: s.lower() == pat
return lambda s: s == pat
_re_compiled_filename_match = None
def re_filename(s):
""" Tests if a string could be a filename. We still get negated character
classes wrong (are they supported), and ranges in character classes. """
global _re_compiled_filename_match
if _re_compiled_filename_match is None:
_re_compiled_filename_match = re.compile('[/*?]|\[[^]]*/[^]]*\]').match
return _re_compiled_filename_match(s)
def re_primary_filename(filename):
""" Tests if a filename string, can be matched against just primary.
Note that this can produce false negatives (Eg. /b?n/zsh) but not false
positives (because the former is a perf hit, and the later is a
failure). Note that this is a superset of re_primary_dirname(). """
if re_primary_dirname(filename):
return True
if filename == '/usr/lib/sendmail':
return True
return False
def re_primary_dirname(dirname):
""" Tests if a dirname string, can be matched against just primary. Note
that this is a subset of re_primary_filename(). """
if 'bin/' in dirname:
return True
if dirname.startswith('/etc/'):
return True
return False
_re_compiled_full_match = None
def re_full_search_needed(s):
""" Tests if a string needs a full nevra match, instead of just name. """
global _re_compiled_full_match
if _re_compiled_full_match is None:
# A glob, or a "." or "-" separator, followed by something (the ".")
one = re.compile('.*([-.*?]|\[.+\]).').match
# Any epoch, for envra
two = re.compile('[0-9]+:').match
_re_compiled_full_match = (one, two)
for rec in _re_compiled_full_match:
if rec(s):
return True
return False
def re_remote_url(s):
""" Tests if a string is a "remote" URL, http, https, ftp. """
s = s.lower()
if s.startswith("http:https://"):
return True
if s.startswith("https://"):
return True
if s.startswith("ftp:https://"):
return True
return False
###########
# Title: Remove duplicates from a sequence
# Submitter: Tim Peters
# From: http:https://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
def unique(s):
"""Return a list of the elements in s, but without duplicates.
For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3],
unique("abcabc") some permutation of ["a", "b", "c"], and
unique(([1, 2], [2, 3], [1, 2])) some permutation of
[[2, 3], [1, 2]].
For best speed, all sequence elements should be hashable. Then
unique() will usually work in linear time.
If not possible, the sequence elements should enjoy a total
ordering, and if list(s).sort() doesn't raise TypeError it's
assumed that they do enjoy a total ordering. Then unique() will
usually work in O(N*log2(N)) time.
If that's not possible either, the sequence elements must support
equality-testing. Then unique() will usually work in quadratic
time.
"""
n = len(s)
if n == 0:
return []
# Try using a set first, as that's the fastest and will usually
# work. If it doesn't work, it will usually fail quickly, so it
# usually doesn't cost much to *try* it. It requires that all the
# sequence elements be hashable, and support equality comparison.
try:
u = set(s)
except TypeError:
pass
else:
return list(u)
# We can't hash all the elements. Second fastest is to sort,
# which brings the equal elements together; then duplicates are
# easy to weed out in a single pass.
# NOTE: Python's list.sort() was designed to be efficient in the
# presence of many duplicate elements. This isn't true of all
# sort functions in all languages or libraries, so this approach
# is more effective in Python than it may be elsewhere.
try:
t = list(s)
t.sort()
except TypeError:
del t # move on to the next method
else:
assert n > 0
last = t[0]
lasti = i = 1
while i < n:
if t[i] != last:
t[lasti] = last = t[i]
lasti += 1
i += 1
return t[:lasti]
# Brute force is all that's left.
u = []
for x in s:
if x not in u:
u.append(x)
return u
class Checksums:
""" Generate checksum(s), on given pieces of data. Producing the
Length and the result(s) when complete. """
def __init__(self, checksums=None, ignore_missing=False, ignore_none=False):
if checksums is None:
checksums = _default_checksums
self._sumalgos = []
self._sumtypes = []
self._len = 0
done = set()
for sumtype in checksums:
if sumtype == 'sha':
sumtype = 'sha1'
if sumtype in done:
continue
if sumtype in _available_checksums:
sumalgo = hashlib.new(sumtype)
elif ignore_missing:
continue
elif sumtype in _fips_noncompliant:
raise FIPSNonCompliantError(sumtype)
else:
raise MiscError, 'Error Checksumming, bad checksum type %s' % sumtype
done.add(sumtype)
self._sumtypes.append(sumtype)
self._sumalgos.append(sumalgo)
if not done and not ignore_none:
raise MiscError, 'Error Checksumming, no valid checksum type'
def __len__(self):
return self._len
# Note that len(x) is assert limited to INT_MAX, which is 2GB on i686.
length = property(fget=lambda self: self._len)
def update(self, data):
self._len += len(data)
for sumalgo in self._sumalgos:
sumalgo.update(data)
def read(self, fo, size=2**16):
data = fo.read(size)
self.update(data)
return data
def hexdigests(self):
ret = {}
for sumtype, sumdata in zip(self._sumtypes, self._sumalgos):
ret[sumtype] = sumdata.hexdigest()
return ret
def hexdigest(self, checksum=None):
if checksum is None:
if not self._sumtypes:
return None
checksum = self._sumtypes[0]
if checksum == 'sha':
checksum = 'sha1'
return self.hexdigests()[checksum]
def digests(self):
ret = {}
for sumtype, sumdata in zip(self._sumtypes, self._sumalgos):
ret[sumtype] = sumdata.digest()
return ret
def digest(self, checksum=None):
if checksum is None:
if not self._sumtypes:
return None
checksum = self._sumtypes[0]
if checksum == 'sha':
checksum = 'sha1'
return self.digests()[checksum]
class AutoFileChecksums:
""" Generate checksum(s), on given file/fileobject. Pretending to be a file
object (overrrides read). """
def __init__(self, fo, checksums, ignore_missing=False, ignore_none=False):
self._fo = fo
self.checksums = Checksums(checksums, ignore_missing, ignore_none)
def __getattr__(self, attr):
return getattr(self._fo, attr)
def read(self, size=-1):
return self.checksums.read(self._fo, size)
def checksum(sumtype, file, CHUNK=2**16, datasize=None):
"""takes filename, hand back Checksum of it
sumtype = md5 or sha/sha1/sha256/sha512 (note sha == sha1)
filename = /path/to/file
CHUNK=65536 by default"""
# chunking brazenly lifted from Ryan Tomayko
try:
if type(file) not in types.StringTypes:
fo = file # assume it's a file-like-object
else:
fo = open(file, 'r')
data = Checksums([sumtype])
while data.read(fo, CHUNK):
if datasize is not None and data.length > datasize:
break
if type(file) is types.StringType:
fo.close()
# This screws up the length, but that shouldn't matter. We only care
# if this checksum == what we expect.
if datasize is not None and datasize != data.length:
return '!%u!%s' % (datasize, data.hexdigest(sumtype))
return data.hexdigest(sumtype)
except (IOError, OSError), e:
raise MiscError, 'Error opening file for checksum: %s' % file
def getFileList(path, ext, filelist):
"""Return all files in path matching ext, store them in filelist,
recurse dirs return list object"""
extlen = len(ext)
try:
dir_list = os.listdir(path)
except OSError, e:
raise MiscError, ('Error accessing directory %s, %s') % (path, e)
for d in dir_list:
if os.path.isdir(path + '/' + d):
filelist = getFileList(path + '/' + d, ext, filelist)
else:
if not ext or d[-extlen:].lower() == '%s' % (ext):
newpath = os.path.normpath(path + '/' + d)
filelist.append(newpath)
return filelist
class GenericHolder:
"""Generic Holder class used to hold other objects of known types
It exists purely to be able to do object.somestuff, object.someotherstuff
or object[key] and pass object to another function that will
understand it"""
def __init__(self, iter=None):
self.__iter = iter
def __iter__(self):
if self.__iter is not None:
return iter(self[self.__iter])
def __getitem__(self, item):
if hasattr(self, item):
return getattr(self, item)
else:
raise KeyError, item
def procgpgkey(rawkey):
'''Convert ASCII armoured GPG key to binary
'''
# TODO: CRC checking? (will RPM do this anyway?)
# Normalise newlines
rawkey = re.sub('\r\n?', '\n', rawkey)
# Extract block
block = StringIO()
inblock = 0
pastheaders = 0
for line in rawkey.split('\n'):
if line.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----'):
inblock = 1
elif inblock and line.strip() == '':
pastheaders = 1
elif inblock and line.startswith('-----END PGP PUBLIC KEY BLOCK-----'):
# Hit the end of the block, get out
break
elif pastheaders and line.startswith('='):
# Hit the CRC line, don't include this and stop
break
elif pastheaders:
block.write(line+'\n')
# Decode and return
return base64.decodestring(block.getvalue())
def gpgkey_fingerprint_ascii(info, chop=4):
''' Given a key_info data from getgpgkeyinfo(), return an ascii
fingerprint. Chop every 4 ascii values, as that is what GPG does. '''
# First "duh" ... it's a method...
fp = info['fingerprint']()
fp = binascii.hexlify(fp)
if chop:
fp = [fp[i:i+chop] for i in range(0, len(fp), chop)]
fp = " ".join(fp)
return fp
def getgpgkeyinfo(rawkey, multiple=False):
'''Return a dict of info for the given ASCII armoured key text
Returned dict will have the following keys: 'userid', 'keyid', 'timestamp'
Will raise ValueError if there was a problem decoding the key.
'''
# Catch all exceptions as there can be quite a variety raised by this call
key_info_objs = []
try:
keys = pgpmsg.decode_multiple_keys(rawkey)
except Exception, e:
raise ValueError(str(e))
if len(keys) == 0:
raise ValueError('No key found in given key data')
for key in keys:
keyid_blob = key.public_key.key_id()
info = {
'userid': key.user_id,
'keyid': struct.unpack('>Q', keyid_blob)[0],
'timestamp': key.public_key.timestamp,
'fingerprint' : key.public_key.fingerprint,
'raw_key' : key.raw_key,
'has_sig' : False,
'valid_sig': False,
}
# Retrieve the timestamp from the matching signature packet
# (this is what RPM appears to do)
for userid in key.user_ids[0]:
if not isinstance(userid, pgpmsg.signature):
continue
if userid.key_id() == keyid_blob:
# Get the creation time sub-packet if available
if hasattr(userid, 'hashed_subpaks'):
tspkt = \
userid.get_hashed_subpak(pgpmsg.SIG_SUB_TYPE_CREATE_TIME)
if tspkt != None:
info['timestamp'] = int(tspkt[1])
break
key_info_objs.append(info)
if multiple:
return key_info_objs
else:
return key_info_objs[0]
def keyIdToRPMVer(keyid):
'''Convert an integer representing a GPG key ID to the hex version string
used by RPM
'''
return "%08x" % (keyid & 0xffffffffL)
def keyInstalled(ts, keyid, timestamp):
'''
Return if the GPG key described by the given keyid and timestamp are
installed in the rpmdb.
The keyid and timestamp should both be passed as integers.
The ts is an rpm transaction set object
Return values:
- -1 key is not installed
- 0 key with matching ID and timestamp is installed
- 1 key with matching ID is installed but has a older timestamp
- 2 key with matching ID is installed but has a newer timestamp
No effort is made to handle duplicates. The first matching keyid is used to
calculate the return result.
'''
# Convert key id to 'RPM' form
keyid = keyIdToRPMVer(keyid)
# Search
for hdr in ts.dbMatch('name', 'gpg-pubkey'):
if hdr['version'] == keyid:
installedts = int(hdr['release'], 16)
if installedts == timestamp:
return 0
elif installedts < timestamp:
return 1
else:
return 2
return -1
def import_key_to_pubring(rawkey, keyid, cachedir=None, gpgdir=None, make_ro_copy=True):
# FIXME - cachedir can be removed from this method when we break api
if not gpgdir:
gpgdir = '%s/gpgdir' % cachedir
if not os.path.exists(gpgdir):
if os.geteuid() != 0:
return False
os.makedirs(gpgdir)
os.environ['GNUPGHOME'] = gpgdir
# import the key
fp = open(os.path.join(gpgdir, 'gpg.conf'), 'wb')
fp.write('')
fp.close()
with gpg.Context() as ctx:
ctx.op_import(rawkey)
if make_ro_copy:
rodir = gpgdir + '-ro'
if not os.path.exists(rodir):
os.makedirs(rodir, mode=0755)
for f in glob.glob(gpgdir + '/*'):
if not os.path.isfile(f):
# This is needed as gpg-agent puts some dirs/sockets in the
# gpgdir (we don't need to copy them anyway), see:
# https://www.gnupg.org/faq/whats-new-in-2.1.html
continue
basename = os.path.basename(f)
ro_f = rodir + '/' + basename
shutil.copy(f, ro_f)
os.chmod(ro_f, 0755)
fp = open(rodir + '/gpg.conf', 'w', 0755)
# yes it is this stupid, why do you ask?
opts="""lock-never
no-auto-check-trustdb
trust-model direct
no-expensive-trust-checks
no-permission-warning
preserve-permissions
"""
fp.write(opts)
fp.close()
return True
def return_keyids_from_pubring(gpgdir):
if not os.path.exists(gpgdir):
return []
os.environ['GNUPGHOME'] = gpgdir
ctx = gpg.Context()
keyids = []
for k in ctx.keylist():
for subkey in k.subkeys:
if subkey.can_sign:
keyids.append(subkey.keyid)
return keyids
def valid_detached_sig(sig_file, signed_file, gpghome=None):
"""takes signature , file that was signed and an optional gpghomedir"""
if gpghome:
if not os.path.exists(gpghome):
return False
os.environ['GNUPGHOME'] = gpghome
if hasattr(sig_file, 'read'):
sig = sig_file
else:
sig = open(sig_file, 'r')
if hasattr(signed_file, 'read'):
signed_text = signed_file
else:
signed_text = open(signed_file, 'r')
plaintext = None
ctx = gpg.Context()
try:
ctx.verify(signed_text, sig, plaintext)
except (gpg.errors.GPGMEError, gpg.errors.BadSignatures):
return False
else:
return True
def getCacheDir(tmpdir='/var/tmp', reuse=True, prefix='yum-'):
"""return a path to a valid and safe cachedir - only used when not running
as root or when --tempcache is set"""
uid = os.geteuid()
try:
usertup = pwd.getpwuid(uid)
username = usertup[0]
# we prefer ascii-only paths
username = urllib.quote(username)
except KeyError:
return None # if it returns None then, well, it's bollocksed
if reuse:
# check for /var/tmp/yum-username-* -
prefix = '%s%s-' % (prefix, username)
dirpath = '%s/%s*' % (tmpdir, prefix)
cachedirs = sorted(glob.glob(dirpath))
for thisdir in cachedirs:
stats = os.lstat(thisdir)
if S_ISDIR(stats[0]) and S_IMODE(stats[0]) == 448 and stats[4] == uid:
return thisdir
# make the dir (tempfile.mkdtemp())
cachedir = tempfile.mkdtemp(prefix=prefix, dir=tmpdir)
return cachedir
def sortPkgObj(pkg1 ,pkg2):
"""sorts a list of yum package objects by name"""
if pkg1.name > pkg2.name:
return 1
elif pkg1.name == pkg2.name:
return 0
else:
return -1
def newestInList(pkgs):
""" Return the newest in the list of packages. """
ret = [ pkgs.pop() ]
newest = ret[0]
for pkg in pkgs:
if pkg.verGT(newest):
ret = [ pkg ]
newest = pkg
elif pkg.verEQ(newest):
ret.append(pkg)
return ret
def version_tuple_to_string(evrTuple):
"""
Convert a tuple representing a package version to a string.
@param evrTuple: A 3-tuple of epoch, version, and release.
Return the string representation of evrTuple.
"""
(e, v, r) = evrTuple
s = ""
if e not in [0, '0', None]:
s += '%s:' % e
if v is not None:
s += '%s' % v
if r is not None:
s += '-%s' % r
return s
def prco_tuple_to_string(prcoTuple):
"""returns a text string of the prco from the tuple format"""
(name, flag, evr) = prcoTuple
flags = {'GT':'>', 'GE':'>=', 'EQ':'=', 'LT':'<', 'LE':'<='}
if flag is None:
return name
return '%s %s %s' % (name, flags[flag], version_tuple_to_string(evr))
def string_to_prco_tuple(prcoString):
"""returns a prco tuple (name, flags, (e, v, r)) for a string"""
if type(prcoString) == types.TupleType:
(n, f, v) = prcoString
else:
n = prcoString
f = v = None
# We love GPG keys as packages, esp. awesome provides like:
# gpg(Fedora (13) <[email protected]>)
if n[0] != '/' and not n.startswith("gpg("):
# not a file dep - look at it for being versioned
prco_split = n.split()
if len(prco_split) == 3:
n, f, v = prco_split
# now we have 'n, f, v' where f and v could be None and None
if f is not None and f not in constants.LETTERFLAGS:
if f not in constants.SYMBOLFLAGS:
try:
f = flagToString(int(f))
except (ValueError,TypeError), e:
raise Errors.MiscError, 'Invalid version flag: %s' % f
else:
f = constants.SYMBOLFLAGS[f]
if type(v) in (types.StringType, types.NoneType, types.UnicodeType):
(prco_e, prco_v, prco_r) = stringToVersion(v)
elif type(v) in (types.TupleType, types.ListType):
(prco_e, prco_v, prco_r) = v
#now we have (n, f, (e, v, r)) for the thing specified
return (n, f, (prco_e, prco_v, prco_r))
def refineSearchPattern(arg):
"""Takes a search string from the cli for Search or Provides
and cleans it up so it doesn't make us vomit"""
if re.search('[*{}?+]|\[.+\]', arg):
restring = fnmatch.translate(arg)
else:
restring = re.escape(arg)
return restring
def _decompress_chunked(source, dest, ztype):
if ztype not in _available_compression:
msg = "%s compression not available" % ztype
raise Errors.MiscError, msg
if ztype == 'bz2':
s_fn = bz2.BZ2File(source, 'r')
elif ztype == 'xz':
s_fn = lzma.LZMAFile(source, 'r')
elif ztype == 'gz':
s_fn = gzip.GzipFile(source, 'r')
destination = open(dest, 'w')
while True:
try:
data = s_fn.read(1024000)
except (OSError, IOError, EOFError), e:
msg = "Error reading from file %s: %s" % (source, str(e))
raise Errors.MiscError, msg
if not data: break
try:
destination.write(data)
except (OSError, IOError), e:
msg = "Error writing to file %s: %s" % (dest, str(e))
raise Errors.MiscError, msg
destination.close()
s_fn.close()
def bunzipFile(source,dest):
""" Extract the bzipped contents of source to dest. """
_decompress_chunked(source, dest, ztype='bz2')
def get_running_kernel_pkgtup(ts):
"""This takes the output of uname and figures out the pkgtup of the running
kernel (name, arch, epoch, version, release)."""
ver = os.uname()[2]
# we glob for the file that MIGHT have this kernel
# and then look up the file in our rpmdb.
fns = sorted(glob.glob('/boot/vmlinuz*%s*' % ver))
for fn in fns:
mi = ts.dbMatch('basenames', fn)
for h in mi:
e = h['epoch']
if h['epoch'] is None:
e = '0'
else:
e = str(e)
return (h['name'], h['arch'], e, h['version'], h['release'])
return (None, None, None, None, None)
def get_running_kernel_version_release(ts):
"""This takes the output of uname and figures out the (version, release)
tuple for the running kernel."""
pkgtup = get_running_kernel_pkgtup(ts)
if pkgtup[0] is not None:
return (pkgtup[3], pkgtup[4])
return (None, None)
def find_unfinished_transactions(yumlibpath='/var/lib/yum'):
"""returns a list of the timestamps from the filenames of the unfinished
transactions remaining in the yumlibpath specified.
"""
timestamps = []
tsallg = '%s/%s' % (yumlibpath, 'transaction-all*')
tsdoneg = '%s/%s' % (yumlibpath, 'transaction-done*')
tsalls = glob.glob(tsallg)
tsdones = glob.glob(tsdoneg)
for fn in tsalls:
if fn.endswith('disabled'):
continue
trans = os.path.basename(fn)
timestamp = trans.replace('transaction-all.','')
timestamps.append(timestamp)
timestamps.sort()
return timestamps
def find_ts_remaining(timestamp, yumlibpath='/var/lib/yum'):
"""this function takes the timestamp of the transaction to look at and
the path to the yum lib dir (defaults to /var/lib/yum)
returns a list of tuples(action, pkgspec) for the unfinished transaction
elements. Returns an empty list if none.
"""
to_complete_items = []
tsallpath = '%s/%s.%s' % (yumlibpath, 'transaction-all', timestamp)
tsdonepath = '%s/%s.%s' % (yumlibpath,'transaction-done', timestamp)
tsdone_items = []
if not os.path.exists(tsallpath):
# something is wrong, here, probably need to raise _something_
return to_complete_items
if os.path.exists(tsdonepath):
tsdone_fo = open(tsdonepath, 'r')
tsdone_items = tsdone_fo.readlines()
tsdone_fo.close()
tsall_fo = open(tsallpath, 'r')
tsall_items = tsall_fo.readlines()
tsall_fo.close()
for item in tsdone_items:
# this probably shouldn't happen but it's worth catching anyway
if item not in tsall_items:
continue
tsall_items.remove(item)
for item in tsall_items:
item = item.replace('\n', '')
if item == '':
continue
try:
(action, pkgspec) = item.split()
except ValueError, e:
msg = "Transaction journal file %s is corrupt." % (tsallpath)
raise Errors.MiscError, msg
to_complete_items.append((action, pkgspec))
return to_complete_items
def seq_max_split(seq, max_entries):
""" Given a seq, split into a list of lists of length max_entries each. """
ret = []
num = len(seq)
seq = list(seq) # Trying to use a set/etc. here is bad
beg = 0
while num > max_entries:
end = beg + max_entries
ret.append(seq[beg:end])
beg += max_entries
num -= max_entries
ret.append(seq[beg:])
return ret
_deletechars = ''.join(chr(i) for i in range(32) if i not in (9, 10, 13))
def to_xml(item, attrib=False):
""" Returns xml-friendly utf-8 encoded string.
Accepts utf-8, iso-8859-1, or unicode.
"""
if type(item) is str:
# check if valid utf8
try: unicode(item, 'utf-8')
except UnicodeDecodeError:
# assume iso-8859-1
item = unicode(item, 'iso-8859-1').encode('utf-8')
elif type(item) is unicode:
item = item.encode('utf-8')
elif item is None:
return ''
else:
raise ValueError, 'String expected, got %s' % repr(item)
# compat cruft...
item = item.rstrip()
# kill ivalid low bytes
item = item.translate(None, _deletechars)
# quote reserved XML characters
item = item.replace('&', '&')
item = item.replace('<', '<')
item = item.replace('>', '>')
if attrib:
item = item.replace('"', '"')
item = item.replace("'", ''')
return item