| Home | Trees | Indices | Help |
|
|---|
|
|
1 # -*- coding: utf-8 -*-
2
3
4 __doc__ = """GNUmed general tools."""
5
6 #===========================================================================
7 __author__ = "K. Hilbert <Karsten.Hilbert@gmx.net>"
8 __license__ = "GPL v2 or later (details at http://www.gnu.org)"
9
10 # std libs
11 import sys
12 import os
13 import os.path
14 import csv
15 import tempfile
16 import logging
17 import hashlib
18 import platform
19 import subprocess
20 import decimal
21 import getpass
22 import io
23 import functools
24 import json
25 import shutil
26 import zipfile
27 import datetime as pydt
28 import re as regex
29 import xml.sax.saxutils as xml_tools
30 # old:
31 import pickle, zlib
32 # docutils
33 du_core = None
34
35
36 # GNUmed libs
37 if __name__ == '__main__':
38 sys.path.insert(0, '../../')
39 from Gnumed.pycommon import gmBorg
40
41
42 _log = logging.getLogger('gm.tools')
43
44 # CAPitalization modes:
45 ( CAPS_NONE, # don't touch it
46 CAPS_FIRST, # CAP first char, leave rest as is
47 CAPS_ALLCAPS, # CAP all chars
48 CAPS_WORDS, # CAP first char of every word
49 CAPS_NAMES, # CAP in a way suitable for names (tries to be smart)
50 CAPS_FIRST_ONLY # CAP first char, lowercase the rest
51 ) = range(6)
52
53
54 u_currency_pound = '\u00A3' # Pound sign
55 u_currency_sign = '\u00A4' # generic currency sign
56 u_currency_yen = '\u00A5' # Yen sign
57 u_right_double_angle_quote = '\u00AB' # <<
58 u_registered_trademark = '\u00AE'
59 u_plus_minus = '\u00B1'
60 u_superscript_one = '\u00B9' # ^1
61 u_left_double_angle_quote = '\u00BB' # >>
62 u_one_quarter = '\u00BC'
63 u_one_half = '\u00BD'
64 u_three_quarters = '\u00BE'
65 u_multiply = '\u00D7' # x
66 u_greek_ALPHA = '\u0391'
67 u_greek_alpha = '\u03b1'
68 u_greek_OMEGA = '\u03A9'
69 u_greek_omega = '\u03c9'
70 u_dagger = '\u2020'
71 u_triangular_bullet = '\u2023' # triangular bullet (>)
72 u_ellipsis = '\u2026' # ...
73 u_euro = '\u20AC' # EURO sign
74 u_numero = '\u2116' # No. / # sign
75 u_down_left_arrow = '\u21B5' # <-'
76 u_left_arrow = '\u2190' # <--
77 u_up_arrow = '\u2191'
78 u_arrow2right = '\u2192' # -->
79 u_down_arrow = '\u2193'
80 u_left_arrow_with_tail = '\u21a2' # <--<
81 u_arrow2right_from_bar = '\u21a6' # |->
82 u_arrow2right_until_vertical_bar = '\u21e5' # -->|
83 u_sum = '\u2211' # sigma
84 u_almost_equal_to = '\u2248' # approximately / nearly / roughly
85 u_corresponds_to = '\u2258'
86 u_infinity = '\u221E'
87 u_arrow2right_until_vertical_bar2 = '\u2b72' # -->|
88
89 u_diameter = '\u2300'
90 u_checkmark_crossed_out = '\u237B'
91 u_box_horiz_high = '\u23ba'
92 u_box_vert_left = '\u23b8'
93 u_box_vert_right = '\u23b9'
94
95 u_space_as_open_box = '\u2423'
96
97 u_box_horiz_single = '\u2500' # -
98 u_box_vert_light = '\u2502'
99 u_box_horiz_light_3dashes = '\u2504' # ...
100 u_box_vert_light_4dashes = '\u2506'
101 u_box_horiz_4dashes = '\u2508' # ....
102 u_box_T_right = '\u251c' # |-
103 u_box_T_left = '\u2524' # -|
104 u_box_T_down = '\u252c'
105 u_box_T_up = '\u2534'
106 u_box_plus = '\u253c'
107 u_box_top_double = '\u2550'
108 u_box_top_left_double_single = '\u2552'
109 u_box_top_right_double_single = '\u2555'
110 u_box_top_left_arc = '\u256d'
111 u_box_top_right_arc = '\u256e'
112 u_box_bottom_right_arc = '\u256f'
113 u_box_bottom_left_arc = '\u2570'
114 u_box_horiz_light_heavy = '\u257c'
115 u_box_horiz_heavy_light = '\u257e'
116
117 u_skull_and_crossbones = '\u2620'
118 u_caduceus = '\u2624'
119 u_frowning_face = '\u2639'
120 u_smiling_face = '\u263a'
121 u_black_heart = '\u2665'
122 u_female = '\u2640'
123 u_male = '\u2642'
124 u_male_female = '\u26a5'
125 u_chain = '\u26d3'
126
127 u_checkmark_thin = '\u2713'
128 u_checkmark_thick = '\u2714'
129 u_heavy_greek_cross = '\u271a'
130 u_arrow2right_thick = '\u2794'
131 u_writing_hand = '\u270d'
132 u_pencil_1 = '\u270e'
133 u_pencil_2 = '\u270f'
134 u_pencil_3 = '\u2710'
135 u_latin_cross = '\u271d'
136
137 u_arrow2right_until_black_diamond = '\u291e' # ->*
138
139 u_kanji_yen = '\u5186' # Yen kanji
140 u_replacement_character = '\ufffd'
141 u_link_symbol = '\u1f517'
142
143
144 _kB = 1024
145 _MB = 1024 * _kB
146 _GB = 1024 * _MB
147 _TB = 1024 * _GB
148 _PB = 1024 * _TB
149
150
151 _client_version = None
152
153
154 _GM_TITLE_PREFIX = 'GMd'
155
156 #===========================================================================
158
159 print(".========================================================")
160 print("| Unhandled exception caught !")
161 print("| Type :", t)
162 print("| Value:", v)
163 print("`========================================================")
164 _log.critical('unhandled exception caught', exc_info = (t,v,tb))
165 sys.__excepthook__(t,v,tb)
166
167 #===========================================================================
168 # path level operations
169 #---------------------------------------------------------------------------
171 """Create directory.
172
173 - creates parent dirs if necessary
174 - does not fail if directory exists
175 <mode>: numeric, say 0o0700 for "-rwx------"
176 """
177 if os.path.isdir(directory):
178 if mode is None:
179 return True
180
181 changed = False
182 old_umask = os.umask(0)
183 try:
184 # does not WORK !
185 #os.chmod(directory, mode, follow_symlinks = (os.chmod in os.supports_follow_symlinks)) # can't do better
186 os.chmod(directory, mode)
187 changed = True
188 finally:
189 os.umask(old_umask)
190 return changed
191
192 if mode is None:
193 os.makedirs(directory)
194 return True
195
196 old_umask = os.umask(0)
197 try:
198 os.makedirs(directory, mode)
199 finally:
200 os.umask(old_umask)
201 return True
202
203 #---------------------------------------------------------------------------
205 assert (directory is not None), '<directory> must not be None'
206
207 README_fname = '.00-README.GNUmed' + coalesce(suffix, '.dir')
208 README_path = os.path.abspath(os.path.expanduser(os.path.join(directory, README_fname)))
209 _log.debug('%s', README_path)
210 if readme is None:
211 _log.debug('no README text, boilerplate only')
212 try:
213 README = open(README_path, mode = 'wt', encoding = 'utf8')
214 except Exception:
215 return False
216
217 line = 'GNUmed v%s -- %s' % (_client_version, pydt.datetime.now().strftime('%c'))
218 len_sep = len(line)
219 README.write(line)
220 README.write('\n')
221 line = README_path
222 len_sep = max(len_sep, len(line))
223 README.write(line)
224 README.write('\n')
225 README.write('-' * len_sep)
226 README.write('\n')
227 README.write('\n')
228 README.write(readme)
229 README.write('\n')
230 README.close()
231 return True
232
233 #---------------------------------------------------------------------------
235 #-------------------------------
236 def _on_rm_error(func, path, exc):
237 _log.error('error while shutil.rmtree(%s)', path, exc_info=exc)
238 return True
239
240 #-------------------------------
241 error_count = 0
242 try:
243 shutil.rmtree(directory, False, _on_rm_error)
244 except Exception:
245 _log.exception('cannot shutil.rmtree(%s)', directory)
246 error_count += 1
247 return error_count
248
249 #---------------------------------------------------------------------------
251 _log.debug('cleaning out [%s]', directory)
252 try:
253 items = os.listdir(directory)
254 except OSError:
255 return False
256 for item in items:
257 # attempt file/link removal and ignore (but log) errors
258 full_item = os.path.join(directory, item)
259 try:
260 os.remove(full_item)
261 except OSError: # as per the docs, this is a directory
262 _log.debug('[%s] seems to be a subdirectory', full_item)
263 errors = rmdir(full_item)
264 if errors > 0:
265 return False
266 except Exception:
267 _log.exception('cannot os.remove(%s) [a file or a link]', full_item)
268 return False
269
270 return True
271
272 #---------------------------------------------------------------------------
274 if base_dir is None:
275 base_dir = gmPaths().tmp_dir
276 else:
277 if not os.path.isdir(base_dir):
278 mkdir(base_dir, mode = 0o0700) # (invoking user only)
279 if prefix is None:
280 prefix = 'sndbx-'
281 return tempfile.mkdtemp(prefix = prefix, suffix = '', dir = base_dir)
282
283 #---------------------------------------------------------------------------
286
287 #---------------------------------------------------------------------------
289 # /home/user/dir/ -> dir
290 # /home/user/dir -> dir
291 return os.path.basename(os.path.normpath(directory)) # normpath removes trailing slashes if any
292
293 #---------------------------------------------------------------------------
295 try:
296 empty = (len(os.listdir(directory)) == 0)
297 except OSError as exc:
298 if exc.errno != 2: # no such file
299 raise
300 empty = None
301 return empty
302
303 #---------------------------------------------------------------------------
305 """Copy the *content* of <directory> *into* <target_directory>
306 which is created if need be.
307 """
308 assert (directory is not None), 'source <directory> must not be None'
309 assert (target_directory is not None), '<target_directory> must not be None'
310 _log.debug('copying content of [%s] into [%s]', directory, target_directory)
311 try:
312 base_dir_items = os.listdir(directory)
313 except OSError:
314 _log.exception('cannot list dir [%s]', directory)
315 return None
316
317 for item in base_dir_items:
318 full_item = os.path.join(directory, item)
319 if os.path.isdir(full_item):
320 target_subdir = os.path.join(target_directory, item)
321 try:
322 shutil.copytree(full_item, target_subdir)
323 continue
324 except Exception:
325 _log.exception('cannot copy subdir [%s]', full_item)
326 return None
327
328 try:
329 shutil.copy2(full_item, target_directory)
330 except Exception:
331 _log.exception('cannot copy file [%s]', full_item)
332 return None
333
334 return target_directory
335
336 #---------------------------------------------------------------------------
337 #---------------------------------------------------------------------------
339 """This class provides the following paths:
340
341 .home_dir user home
342 .local_base_dir script installation dir
343 .working_dir current dir
344 .user_config_dir
345 .system_config_dir
346 .system_app_data_dir (not writable)
347 .tmp_dir instance-local
348 .user_tmp_dir user-local (NOT per instance)
349 .bytea_cache_dir caches downloaded BYTEA data
350 """
352 """Setup pathes.
353
354 <app_name> will default to (name of the script - .py)
355 """
356 try:
357 self.already_inited
358 return
359 except AttributeError:
360 pass
361
362 self.init_paths(app_name=app_name, wx=wx)
363 self.already_inited = True
364
365 #--------------------------------------
366 # public API
367 #--------------------------------------
369
370 if wx is None:
371 _log.debug('wxPython not available')
372 _log.debug('detecting paths directly')
373
374 if app_name is None:
375 app_name, ext = os.path.splitext(os.path.basename(sys.argv[0]))
376 _log.info('app name detected as [%s]', app_name)
377 else:
378 _log.info('app name passed in as [%s]', app_name)
379
380 # the user home, doesn't work in Wine so work around that
381 self.__home_dir = None
382
383 # where the main script (the "binary") is installed
384 if getattr(sys, 'frozen', False):
385 _log.info('frozen app, installed into temporary path')
386 # this would find the path of *THIS* file
387 #self.local_base_dir = os.path.dirname(__file__)
388 # while this is documented on the web, the ${_MEIPASS2} does not exist
389 #self.local_base_dir = os.environ.get('_MEIPASS2')
390 # this is what Martin Zibricky <mzibr.public@gmail.com> told us to use
391 # when asking about this on pyinstaller@googlegroups.com
392 #self.local_base_dir = sys._MEIPASS
393 # however, we are --onedir, so we should look at sys.executable
394 # as per the pyinstaller manual
395 self.local_base_dir = os.path.dirname(sys.executable)
396 else:
397 self.local_base_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
398
399 # the current working dir at the OS
400 self.working_dir = os.path.abspath(os.curdir)
401
402 # user-specific config dir, usually below the home dir
403 mkdir(os.path.join(self.home_dir, '.%s' % app_name))
404 self.user_config_dir = os.path.join(self.home_dir, '.%s' % app_name)
405
406 # system-wide config dir, usually below /etc/ under UN*X
407 try:
408 self.system_config_dir = os.path.join('/etc', app_name)
409 except ValueError:
410 #self.system_config_dir = self.local_base_dir
411 self.system_config_dir = self.user_config_dir
412
413 # system-wide application data dir
414 try:
415 self.system_app_data_dir = os.path.join(sys.prefix, 'share', app_name)
416 except ValueError:
417 self.system_app_data_dir = self.local_base_dir
418
419 # temporary directory
420 try:
421 self.__tmp_dir_already_set
422 _log.debug('temp dir already set')
423 except AttributeError:
424 _log.info('temp file prefix: %s', tempfile.gettempprefix())
425 _log.info('initial (user level) temp dir: %s', tempfile.gettempdir())
426 bytes_free = shutil.disk_usage(tempfile.gettempdir()).free
427 _log.info('free disk space for temp dir: %s (%s bytes)', size2str(size = bytes_free), bytes_free)
428 # $TMP/gnumed-$USER/
429 self.user_tmp_dir = os.path.join(tempfile.gettempdir(), '%s-%s' % (app_name, getpass.getuser()))
430 mkdir(self.user_tmp_dir, 0o700)
431 _log.info('intermediate (app+user level) temp dir: %s', self.user_tmp_dir)
432 # $TMP/gnumed-$USER/g-$UNIQUE/
433 tempfile.tempdir = self.user_tmp_dir # tell mkdtemp about intermediate dir
434 self.tmp_dir = tempfile.mkdtemp(prefix = 'g-') # will set tempfile.tempdir as side effect
435 _log.info('final (app instance level) temp dir: %s', tempfile.gettempdir())
436 create_directory_description_file(directory = self.tmp_dir, readme = 'client instance tmp dir')
437
438 # BYTEA cache dir
439 cache_dir = os.path.join(self.user_tmp_dir, '.bytea_cache')
440 try:
441 stat = os.stat(cache_dir)
442 _log.warning('reusing BYTEA cache dir: %s', cache_dir)
443 _log.debug(stat)
444 except FileNotFoundError:
445 mkdir(cache_dir, mode = 0o0700)
446 self.bytea_cache_dir = cache_dir
447 create_directory_description_file(directory = self.bytea_cache_dir, readme = 'cache dir for BYTEA data')
448
449 self.__log_paths()
450 if wx is None:
451 return True
452
453 # retry with wxPython
454 _log.debug('re-detecting paths with wxPython')
455
456 std_paths = wx.StandardPaths.Get()
457 _log.info('wxPython app name is [%s]', wx.GetApp().GetAppName())
458
459 # user-specific config dir, usually below the home dir
460 mkdir(os.path.join(std_paths.GetUserConfigDir(), '.%s' % app_name))
461 self.user_config_dir = os.path.join(std_paths.GetUserConfigDir(), '.%s' % app_name)
462
463 # system-wide config dir, usually below /etc/ under UN*X
464 try:
465 tmp = std_paths.GetConfigDir()
466 if not tmp.endswith(app_name):
467 tmp = os.path.join(tmp, app_name)
468 self.system_config_dir = tmp
469 except ValueError:
470 # leave it at what it was from direct detection
471 pass
472
473 # system-wide application data dir
474 # Robin attests that the following doesn't always
475 # give sane values on Windows, so IFDEF it
476 if 'wxMSW' in wx.PlatformInfo:
477 _log.warning('this platform (wxMSW) sometimes returns a broken value for the system-wide application data dir')
478 else:
479 try:
480 self.system_app_data_dir = std_paths.GetDataDir()
481 except ValueError:
482 pass
483
484 self.__log_paths()
485 return True
486
487 #--------------------------------------
489 _log.debug('sys.argv[0]: %s', sys.argv[0])
490 _log.debug('sys.executable: %s', sys.executable)
491 _log.debug('sys._MEIPASS: %s', getattr(sys, '_MEIPASS', '<not found>'))
492 _log.debug('os.environ["_MEIPASS2"]: %s', os.environ.get('_MEIPASS2', '<not found>'))
493 _log.debug('__file__ : %s', __file__)
494 _log.debug('local application base dir: %s', self.local_base_dir)
495 _log.debug('current working dir: %s', self.working_dir)
496 _log.debug('user home dir: %s', self.home_dir)
497 _log.debug('user-specific config dir: %s', self.user_config_dir)
498 _log.debug('system-wide config dir: %s', self.system_config_dir)
499 _log.debug('system-wide application data dir: %s', self.system_app_data_dir)
500 _log.debug('temporary dir (user): %s', self.user_tmp_dir)
501 _log.debug('temporary dir (instance): %s', self.tmp_dir)
502 _log.debug('temporary dir (tempfile.tempdir): %s', tempfile.tempdir)
503 _log.debug('temporary dir (tempfile.gettempdir()): %s', tempfile.gettempdir())
504 _log.debug('BYTEA cache dir: %s', self.bytea_cache_dir)
505
506 #--------------------------------------
507 # properties
508 #--------------------------------------
510 if not (os.access(path, os.R_OK) and os.access(path, os.X_OK)):
511 msg = '[%s:user_config_dir]: invalid path [%s]' % (self.__class__.__name__, path)
512 _log.error(msg)
513 raise ValueError(msg)
514 self.__user_config_dir = path
515
518
519 user_config_dir = property(_get_user_config_dir, _set_user_config_dir)
520 #--------------------------------------
522 if not (os.access(path, os.R_OK) and os.access(path, os.X_OK)):
523 msg = '[%s:system_config_dir]: invalid path [%s]' % (self.__class__.__name__, path)
524 _log.error(msg)
525 raise ValueError(msg)
526 self.__system_config_dir = path
527
530
531 system_config_dir = property(_get_system_config_dir, _set_system_config_dir)
532 #--------------------------------------
534 if not (os.access(path, os.R_OK) and os.access(path, os.X_OK)):
535 msg = '[%s:system_app_data_dir]: invalid path [%s]' % (self.__class__.__name__, path)
536 _log.error(msg)
537 raise ValueError(msg)
538 self.__system_app_data_dir = path
539
542
543 system_app_data_dir = property(_get_system_app_data_dir, _set_system_app_data_dir)
544 #--------------------------------------
547
549 if self.__home_dir is not None:
550 return self.__home_dir
551
552 tmp = os.path.expanduser('~')
553 if tmp == '~':
554 _log.error('this platform does not expand ~ properly')
555 try:
556 tmp = os.environ['USERPROFILE']
557 except KeyError:
558 _log.error('cannot access $USERPROFILE in environment')
559
560 if not (
561 os.access(tmp, os.R_OK)
562 and
563 os.access(tmp, os.X_OK)
564 and
565 os.access(tmp, os.W_OK)
566 ):
567 msg = '[%s:home_dir]: invalid path [%s]' % (self.__class__.__name__, tmp)
568 _log.error(msg)
569 raise ValueError(msg)
570
571 self.__home_dir = tmp
572 return self.__home_dir
573
574 home_dir = property(_get_home_dir, _set_home_dir)
575
576 #--------------------------------------
578 if not (os.access(path, os.R_OK) and os.access(path, os.X_OK)):
579 msg = '[%s:tmp_dir]: invalid path [%s]' % (self.__class__.__name__, path)
580 _log.error(msg)
581 raise ValueError(msg)
582 _log.debug('previous temp dir: %s', tempfile.gettempdir())
583 self.__tmp_dir = path
584 tempfile.tempdir = self.__tmp_dir
585 _log.debug('new temp dir: %s', tempfile.gettempdir())
586 self.__tmp_dir_already_set = True
587
590
591 tmp_dir = property(_get_tmp_dir, _set_tmp_dir)
592
593 #===========================================================================
594 # file related tools
595 #---------------------------------------------------------------------------
596 -def recode_file(source_file=None, target_file=None, source_encoding='utf8', target_encoding=None, base_dir=None, error_mode='replace'):
597 if target_encoding is None:
598 return source_file
599
600 if target_encoding == source_encoding:
601 return source_file
602
603 if target_file is None:
604 target_file = get_unique_filename (
605 prefix = '%s-%s_%s-' % (fname_stem(source_file), source_encoding, target_encoding),
606 suffix = fname_extension(source_file, '.txt'),
607 tmp_dir = base_dir
608 )
609 _log.debug('[%s] -> [%s] (%s -> %s)', source_encoding, target_encoding, source_file, target_file)
610 in_file = io.open(source_file, mode = 'rt', encoding = source_encoding)
611 out_file = io.open(target_file, mode = 'wt', encoding = target_encoding, errors = error_mode)
612 for line in in_file:
613 out_file.write(line)
614 out_file.close()
615 in_file.close()
616 return target_file
617
618 #---------------------------------------------------------------------------
620 _log.debug('unzipping [%s] -> [%s]', archive_name, target_dir)
621 success = False
622 try:
623 with zipfile.ZipFile(archive_name) as archive:
624 archive.extractall(target_dir)
625 success = True
626 except Exception:
627 _log.exception('cannot unzip')
628 return False
629 if remove_archive:
630 remove_file(archive_name)
631 return success
632
633 #---------------------------------------------------------------------------
635 if not os.path.lexists(filename):
636 return True
637
638 # attempt file removal and ignore (but log) errors
639 try:
640 os.remove(filename)
641 return True
642
643 except Exception:
644 if log_error:
645 _log.exception('cannot os.remove(%s)', filename)
646
647 if force:
648 tmp_name = get_unique_filename(tmp_dir = fname_dir(filename))
649 _log.debug('attempting os.replace(%s -> %s)', filename, tmp_name)
650 try:
651 os.replace(filename, tmp_name)
652 return True
653
654 except Exception:
655 if log_error:
656 _log.exception('cannot os.replace(%s)', filename)
657
658 return False
659
660 #---------------------------------------------------------------------------
662 blocksize = 2**10 * 128 # 128k, since md5 uses 128 byte blocks
663 _log.debug('md5(%s): <%s> byte blocks', filename, blocksize)
664 f = io.open(filename, mode = 'rb')
665 md5 = hashlib.md5()
666 while True:
667 data = f.read(blocksize)
668 if not data:
669 break
670 md5.update(data)
671 f.close()
672 _log.debug('md5(%s): %s', filename, md5.hexdigest())
673 if return_hex:
674 return md5.hexdigest()
675
676 return md5.digest()
677
678 #---------------------------------------------------------------------------
680 _log.debug('chunked_md5(%s, chunk_size=%s bytes)', filename, chunk_size)
681 md5_concat = ''
682 f = open(filename, 'rb')
683 while True:
684 md5 = hashlib.md5()
685 data = f.read(chunk_size)
686 if not data:
687 break
688 md5.update(data)
689 md5_concat += md5.hexdigest()
690 f.close()
691 md5 = hashlib.md5()
692 md5.update(md5_concat)
693 hex_digest = md5.hexdigest()
694 _log.debug('md5("%s"): %s', md5_concat, hex_digest)
695 return hex_digest
696
697 #---------------------------------------------------------------------------
698 default_csv_reader_rest_key = 'list_of_values_of_unknown_fields'
699
701 try:
702 is_dict_reader = kwargs['dict']
703 del kwargs['dict']
704 except KeyError:
705 is_dict_reader = False
706
707 if is_dict_reader:
708 kwargs['restkey'] = default_csv_reader_rest_key
709 return csv.DictReader(unicode_csv_data, dialect=dialect, **kwargs)
710 return csv.reader(unicode_csv_data, dialect=dialect, **kwargs)
711
712
713
714
718
719 #def utf_8_encoder(unicode_csv_data):
720 # for line in unicode_csv_data:
721 # yield line.encode('utf-8')
722
724
725 # csv.py doesn't do Unicode; encode temporarily as UTF-8:
726 try:
727 is_dict_reader = kwargs['dict']
728 del kwargs['dict']
729 if is_dict_reader is not True:
730 raise KeyError
731 kwargs['restkey'] = default_csv_reader_rest_key
732 csv_reader = csv.DictReader(unicode2charset_encoder(unicode_csv_data), dialect=dialect, **kwargs)
733 except KeyError:
734 is_dict_reader = False
735 csv_reader = csv.reader(unicode2charset_encoder(unicode_csv_data), dialect=dialect, **kwargs)
736
737 for row in csv_reader:
738 # decode ENCODING back to Unicode, cell by cell:
739 if is_dict_reader:
740 for key in row:
741 if key == default_csv_reader_rest_key:
742 old_data = row[key]
743 new_data = []
744 for val in old_data:
745 new_data.append(str(val, encoding))
746 row[key] = new_data
747 if default_csv_reader_rest_key not in csv_reader.fieldnames:
748 csv_reader.fieldnames.append(default_csv_reader_rest_key)
749 else:
750 row[key] = str(row[key], encoding)
751 yield row
752 else:
753 yield [ str(cell, encoding) for cell in row ]
754 #yield [str(cell, 'utf-8') for cell in row]
755
756 #---------------------------------------------------------------------------
758 """Normalizes unicode, removes non-alpha characters, converts spaces to underscores."""
759
760 dir_part, name_part = os.path.split(filename)
761 if name_part == '':
762 return filename
763
764 import unicodedata
765 name_part = unicodedata.normalize('NFKD', name_part)
766 # remove everything not in group []
767 name_part = regex.sub (
768 '[^.\w\s[\]()%§+-]',
769 '',
770 name_part,
771 flags = regex.UNICODE
772 ).strip()
773 # translate whitespace to underscore
774 name_part = regex.sub (
775 '\s+',
776 '_',
777 name_part,
778 flags = regex.UNICODE
779 )
780 return os.path.join(dir_part, name_part)
781
782 #---------------------------------------------------------------------------
784 """/home/user/dir/filename.ext -> filename"""
785 return os.path.splitext(os.path.basename(filename))[0]
786
787 #---------------------------------------------------------------------------
789 """/home/user/dir/filename.ext -> /home/user/dir/filename"""
790 return os.path.splitext(filename)[0]
791
792 #---------------------------------------------------------------------------
794 """ /home/user/dir/filename.ext -> .ext
795 '' or '.' -> fallback if any else ''
796 """
797 ext = os.path.splitext(filename)[1]
798 if ext.strip() not in ['.', '']:
799 return ext
800 if fallback is None:
801 return ''
802 return fallback
803
804 #---------------------------------------------------------------------------
808
809 #---------------------------------------------------------------------------
813
814 #---------------------------------------------------------------------------
816 """This function has a race condition between
817 its file.close()
818 and actually
819 using the filename in callers.
820
821 The file will NOT exist after calling this function.
822 """
823 if tmp_dir is None:
824 gmPaths() # setup tmp dir if necessary
825 else:
826 if (
827 not os.access(tmp_dir, os.F_OK)
828 or
829 not os.access(tmp_dir, os.X_OK | os.W_OK)
830 ):
831 _log.warning('cannot os.access() temporary dir [%s], using system default', tmp_dir)
832 tmp_dir = None
833
834 if include_timestamp:
835 ts = pydt.datetime.now().strftime('%m%d-%H%M%S-')
836 else:
837 ts = ''
838
839 kwargs = {
840 'dir': tmp_dir,
841 # make sure file gets deleted as soon as
842 # .close()d so we can "safely" open it again
843 'delete': True
844 }
845
846 if prefix is None:
847 kwargs['prefix'] = 'gm-%s' % ts
848 else:
849 kwargs['prefix'] = prefix + ts
850
851 if suffix in [None, '']:
852 kwargs['suffix'] = '.tmp'
853 else:
854 if not suffix.startswith('.'):
855 suffix = '.' + suffix
856 kwargs['suffix'] = suffix
857
858 f = tempfile.NamedTemporaryFile(**kwargs)
859 filename = f.name
860 f.close()
861
862 return filename
863
864 #---------------------------------------------------------------------------
866 import ctypes
867 #windows_create_symlink = ctypes.windll.kernel32.CreateSymbolicLinkW
868 kernel32 = ctype.WinDLL('kernel32', use_last_error = True)
869 windows_create_symlink = kernel32.CreateSymbolicLinkW
870 windows_create_symlink.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
871 windows_create_symlink.restype = ctypes.c_ubyte
872 if os.path.isdir(physical_name):
873 flags = 1
874 else:
875 flags = 0
876 ret_code = windows_create_symlink(link_name, physical_name.replace('/', '\\'), flags)
877 _log.debug('ctypes.windll.kernel32.CreateSymbolicLinkW() [%s] exit code: %s', windows_create_symlink, ret_code)
878 if ret_code == 0:
879 raise ctypes.WinError()
880 return ret_code
881
882 #---------------------------------------------------------------------------
884
885 _log.debug('creating symlink (overwrite = %s):', overwrite)
886 _log.debug('link [%s] =>', link_name)
887 _log.debug('=> physical [%s]', physical_name)
888
889 if os.path.exists(link_name):
890 _log.debug('link exists')
891 if overwrite:
892 return True
893 return False
894
895 try:
896 os.symlink(physical_name, link_name)
897 except (AttributeError, NotImplementedError):
898 _log.debug('this Python does not have os.symlink(), trying via ctypes')
899 __make_symlink_on_windows(physical_name, link_name)
900 except PermissionError:
901 _log.exception('cannot create link')
902 return False
903 #except OSError:
904 # unpriviledged on Windows
905 return True
906
907 #===========================================================================
908 -def import_module_from_directory(module_path=None, module_name=None, always_remove_path=False):
909 """Import a module from any location."""
910
911 _log.debug('CWD: %s', os.getcwd())
912
913 remove_path = always_remove_path or False
914 if module_path not in sys.path:
915 _log.info('appending to sys.path: [%s]' % module_path)
916 sys.path.append(module_path)
917 remove_path = True
918
919 _log.debug('will remove import path: %s', remove_path)
920
921 if module_name.endswith('.py'):
922 module_name = module_name[:-3]
923
924 try:
925 module = __import__(module_name)
926 except Exception:
927 _log.exception('cannot __import__() module [%s] from [%s]' % (module_name, module_path))
928 while module_path in sys.path:
929 sys.path.remove(module_path)
930 raise
931
932 _log.info('imported module [%s] as [%s]' % (module_name, module))
933 if remove_path:
934 while module_path in sys.path:
935 sys.path.remove(module_path)
936
937 return module
938
939 #===========================================================================
940 # text related tools
941 #---------------------------------------------------------------------------
943 if size == 1:
944 return template % _('1 Byte')
945 if size < 10 * _kB:
946 return template % _('%s Bytes') % size
947 if size < _MB:
948 return template % '%.1f kB' % (float(size) / _kB)
949 if size < _GB:
950 return template % '%.1f MB' % (float(size) / _MB)
951 if size < _TB:
952 return template % '%.1f GB' % (float(size) / _GB)
953 if size < _PB:
954 return template % '%.1f TB' % (float(size) / _TB)
955 return template % '%.1f PB' % (float(size) / _PB)
956
957 #---------------------------------------------------------------------------
959 if boolean is None:
960 return none_return
961 if boolean:
962 return true_return
963 if not boolean:
964 return false_return
965 raise ValueError('bool2subst(): <boolean> arg must be either of True, False, None')
966
967 #---------------------------------------------------------------------------
969 return bool2subst (
970 boolean = bool(boolean),
971 true_return = true_str,
972 false_return = false_str
973 )
974
975 #---------------------------------------------------------------------------
977 """Modelled after the SQL NULLIF function."""
978 if value is None:
979 return None
980
981 if strip_string:
982 stripped = value.strip()
983 else:
984 stripped = value
985 if stripped == none_equivalent:
986 return None
987
988 return value
989
990 #---------------------------------------------------------------------------
991 -def coalesce(value2test=None, return_instead=None, template4value=None, template4instead=None, none_equivalents=None, function4value=None, value2return=None):
992 """Modelled after the SQL coalesce function.
993
994 To be used to simplify constructs like:
995
996 if value2test is None (or in none_equivalents):
997 value = (template4instead % return_instead) or return_instead
998 else:
999 value = (template4value % value2test) or value2test
1000 print value
1001
1002 @param value2test: the value to be tested for <None>
1003
1004 @param return_instead: the value to be returned if <value2test> *is* None
1005
1006 @param template4value: if <value2test> is returned, replace the value into this template, must contain one <%s>
1007
1008 @param template4instead: if <return_instead> is returned, replace the value into this template, must contain one <%s>
1009
1010 @param value2return: a *value* to return if <value2test> is NOT None, AND there's no <template4value>
1011
1012 example:
1013 function4value = ('strftime', '%Y-%m-%d')
1014
1015 Ideas:
1016 - list of return_insteads: initial, [return_instead, template], [return_instead, template], [return_instead, template], template4value, ...
1017 """
1018 if none_equivalents is None:
1019 none_equivalents = [None]
1020
1021 if value2test in none_equivalents:
1022 if template4instead is None:
1023 return return_instead
1024 return template4instead % return_instead
1025
1026 # at this point, value2test was not equivalent to None
1027
1028 # 1) explicit value to return supplied ?
1029 if value2return is not None:
1030 return value2return
1031
1032 value2return = value2test
1033 # 2) function supplied to be applied to the value ?
1034 if function4value is not None:
1035 funcname, args = function4value
1036 func = getattr(value2test, funcname)
1037 value2return = func(args)
1038
1039 # 3) template supplied to be applied to the value ?
1040 if template4value is None:
1041 return value2return
1042
1043 try:
1044 return template4value % value2return
1045 except TypeError:
1046 # except (TypeError, ValueError):
1047 # this should go, actually, only needed because "old" calls
1048 # to coalesce will still abuse template4value as explicit value2return,
1049 # relying on the replacement to above to fail
1050 if hasattr(_log, 'log_stack_trace'):
1051 _log.log_stack_trace(message = 'deprecated use of <template4value> for <value2return>')
1052 else:
1053 _log.error('deprecated use of <template4value> for <value2return>')
1054 _log.error(locals())
1055 return template4value
1056
1057 #---------------------------------------------------------------------------
1059 val = match_obj.group(0).lower()
1060 if val in ['von', 'van', 'de', 'la', 'l', 'der', 'den']: # FIXME: this needs to expand, configurable ?
1061 return val
1062 buf = list(val)
1063 buf[0] = buf[0].upper()
1064 for part in ['mac', 'mc', 'de', 'la']:
1065 if len(val) > len(part) and val[:len(part)] == part:
1066 buf[len(part)] = buf[len(part)].upper()
1067 return ''.join(buf)
1068
1069 #---------------------------------------------------------------------------
1071 """Capitalize the first character but leave the rest alone.
1072
1073 Note that we must be careful about the locale, this may
1074 have issues ! However, for UTF strings it should just work.
1075 """
1076 if (mode is None) or (mode == CAPS_NONE):
1077 return text
1078
1079 if len(text) == 0:
1080 return text
1081
1082 if mode == CAPS_FIRST:
1083 if len(text) == 1:
1084 return text[0].upper()
1085 return text[0].upper() + text[1:]
1086
1087 if mode == CAPS_ALLCAPS:
1088 return text.upper()
1089
1090 if mode == CAPS_FIRST_ONLY:
1091 # if len(text) == 1:
1092 # return text[0].upper()
1093 return text[0].upper() + text[1:].lower()
1094
1095 if mode == CAPS_WORDS:
1096 #return regex.sub(ur'(\w)(\w+)', lambda x: x.group(1).upper() + x.group(2).lower(), text)
1097 return regex.sub(r'(\w)(\w+)', lambda x: x.group(1).upper() + x.group(2).lower(), text)
1098
1099 if mode == CAPS_NAMES:
1100 #return regex.sub(r'\w+', __cap_name, text)
1101 return capitalize(text=text, mode=CAPS_FIRST) # until fixed
1102
1103 print("ERROR: invalid capitalization mode: [%s], leaving input as is" % mode)
1104 return text
1105
1106 #---------------------------------------------------------------------------
1108
1109 if isinstance(initial, decimal.Decimal):
1110 return True, initial
1111
1112 val = initial
1113
1114 # float ? -> to string first
1115 if type(val) == type(float(1.4)):
1116 val = str(val)
1117
1118 # string ? -> "," to "."
1119 if isinstance(val, str):
1120 val = val.replace(',', '.', 1)
1121 val = val.strip()
1122
1123 try:
1124 d = decimal.Decimal(val)
1125 return True, d
1126 except (TypeError, decimal.InvalidOperation):
1127 return False, val
1128
1129 #---------------------------------------------------------------------------
1131
1132 val = initial
1133
1134 # string ? -> "," to "."
1135 if isinstance(val, str):
1136 val = val.replace(',', '.', 1)
1137 val = val.strip()
1138
1139 try:
1140 int_val = int(val)
1141 except (TypeError, ValueError):
1142 _log.exception('int(%s) failed', val)
1143 return False, initial
1144
1145 if minval is not None:
1146 if int_val < minval:
1147 _log.debug('%s < min (%s)', val, minval)
1148 return False, initial
1149 if maxval is not None:
1150 if int_val > maxval:
1151 _log.debug('%s > max (%s)', val, maxval)
1152 return False, initial
1153
1154 return True, int_val
1155
1156 #---------------------------------------------------------------------------
1158 if remove_whitespace:
1159 text = text.lstrip()
1160 if not text.startswith(prefix):
1161 return text
1162
1163 text = text.replace(prefix, '', 1)
1164 if not remove_repeats:
1165 if remove_whitespace:
1166 return text.lstrip()
1167 return text
1168
1169 return strip_prefix(text, prefix, remove_repeats = True, remove_whitespace = remove_whitespace)
1170
1171 #---------------------------------------------------------------------------
1173 suffix_len = len(suffix)
1174 if remove_repeats:
1175 if remove_whitespace:
1176 while text.rstrip().endswith(suffix):
1177 text = text.rstrip()[:-suffix_len].rstrip()
1178 return text
1179 while text.endswith(suffix):
1180 text = text[:-suffix_len]
1181 return text
1182 if remove_whitespace:
1183 return text.rstrip()[:-suffix_len].rstrip()
1184 return text[:-suffix_len]
1185
1186 #---------------------------------------------------------------------------
1188 if lines is None:
1189 lines = text.split(eol)
1190
1191 while True:
1192 if lines[0].strip(eol).strip() != '':
1193 break
1194 lines = lines[1:]
1195
1196 if return_list:
1197 return lines
1198
1199 return eol.join(lines)
1200
1201 #---------------------------------------------------------------------------
1203 if lines is None:
1204 lines = text.split(eol)
1205
1206 while True:
1207 if lines[-1].strip(eol).strip() != '':
1208 break
1209 lines = lines[:-1]
1210
1211 if return_list:
1212 return lines
1213
1214 return eol.join(lines)
1215
1216 #---------------------------------------------------------------------------
1218 return strip_trailing_empty_lines (
1219 lines = strip_leading_empty_lines(lines = lines, text = text, eol = eol, return_list = True),
1220 text = None,
1221 eol = eol,
1222 return_list = return_list
1223 )
1224
1225 #---------------------------------------------------------------------------
1226 -def list2text(lines, initial_indent='', subsequent_indent='', eol='\n', strip_leading_empty_lines=True, strip_trailing_empty_lines=True, strip_trailing_whitespace=True, max_line_width=None):
1227
1228 if len(lines) == 0:
1229 return ''
1230
1231 if strip_leading_empty_lines:
1232 lines = strip_leading_empty_lines(lines = lines, eol = eol, return_list = True)
1233
1234 if strip_trailing_empty_lines:
1235 lines = strip_trailing_empty_lines(lines = lines, eol = eol, return_list = True)
1236
1237 if strip_trailing_whitespace:
1238 lines = [ l.rstrip() for l in lines ]
1239
1240 if max_line_width is not None:
1241 wrapped_lines = []
1242 for l in lines:
1243 wrapped_lines.extend(wrap(l, max_line_width).split('\n'))
1244 lines = wrapped_lines
1245
1246 indented_lines = [initial_indent + lines[0]]
1247 indented_lines.extend([ subsequent_indent + l for l in lines[1:] ])
1248
1249 return eol.join(indented_lines)
1250
1251 #---------------------------------------------------------------------------
1253 """A word-wrap function that preserves existing line breaks
1254 and most spaces in the text. Expects that existing line
1255 breaks are posix newlines (\n).
1256 """
1257 if width is None:
1258 return text
1259
1260 wrapped = initial_indent + functools.reduce (
1261 lambda line, word, width=width: '%s%s%s' % (
1262 line,
1263 ' \n'[(len(line) - line.rfind('\n') - 1 + len(word.split('\n',1)[0]) >= width)],
1264 word
1265 ),
1266 text.split(' ')
1267 )
1268 if subsequent_indent != '':
1269 wrapped = ('\n%s' % subsequent_indent).join(wrapped.split('\n'))
1270 if eol != '\n':
1271 wrapped = wrapped.replace('\n', eol)
1272 return wrapped
1273
1274 #---------------------------------------------------------------------------
1275 -def unwrap(text=None, max_length=None, strip_whitespace=True, remove_empty_lines=True, line_separator = ' // '):
1276
1277 text = text.replace('\r', '')
1278 lines = text.split('\n')
1279 text = ''
1280 for line in lines:
1281
1282 if strip_whitespace:
1283 line = line.strip().strip('\t').strip()
1284
1285 if remove_empty_lines:
1286 if line == '':
1287 continue
1288
1289 text += ('%s%s' % (line, line_separator))
1290
1291 text = text.rstrip(line_separator)
1292
1293 if max_length is not None:
1294 text = text[:max_length]
1295
1296 text = text.rstrip(line_separator)
1297
1298 return text
1299
1300 #---------------------------------------------------------------------------
1302
1303 if len(text) <= max_length:
1304 return text
1305
1306 return text[:max_length-1] + u_ellipsis
1307
1308 #---------------------------------------------------------------------------
1309 -def shorten_words_in_line(text=None, max_length=None, min_word_length=None, ignore_numbers=True, ellipsis=u_ellipsis):
1310 if text is None:
1311 return None
1312 if max_length is None:
1313 max_length = len(text)
1314 else:
1315 if len(text) <= max_length:
1316 return text
1317 old_words = regex.split('\s+', text, flags = regex.UNICODE)
1318 no_old_words = len(old_words)
1319 max_word_length = max(min_word_length, (max_length // no_old_words))
1320 words = []
1321 for word in old_words:
1322 if len(word) <= max_word_length:
1323 words.append(word)
1324 continue
1325 if ignore_numbers:
1326 tmp = word.replace('-', '').replace('+', '').replace('.', '').replace(',', '').replace('/', '').replace('&', '').replace('*', '')
1327 if tmp.isdigit():
1328 words.append(word)
1329 continue
1330 words.append(word[:max_word_length] + ellipsis)
1331 return ' '.join(words)
1332
1333 #---------------------------------------------------------------------------
1337
1338 #---------------------------------------------------------------------------
1339 -def tex_escape_string(text=None, replace_known_unicode=True, replace_eol=False, keep_visual_eol=False):
1340 """Check for special TeX characters and transform them.
1341
1342 replace_eol:
1343 replaces "\n" with "\\newline"
1344 keep_visual_eol:
1345 replaces "\n" with "\\newline \n" such that
1346 both LaTeX will know to place a line break
1347 at this point as well as the visual formatting
1348 is preserved in the LaTeX source (think multi-
1349 row table cells)
1350 """
1351 text = text.replace('\\', '\\textbackslash') # requires \usepackage{textcomp} in LaTeX source
1352 text = text.replace('^', '\\textasciicircum')
1353 text = text.replace('~', '\\textasciitilde')
1354
1355 text = text.replace('{', '\\{')
1356 text = text.replace('}', '\\}')
1357 text = text.replace('%', '\\%')
1358 text = text.replace('&', '\\&')
1359 text = text.replace('#', '\\#')
1360 text = text.replace('$', '\\$')
1361 text = text.replace('_', '\\_')
1362 if replace_eol:
1363 if keep_visual_eol:
1364 text = text.replace('\n', '\\newline \n')
1365 else:
1366 text = text.replace('\n', '\\newline ')
1367
1368 if replace_known_unicode:
1369 # this should NOT be replaced for Xe(La)Tex
1370 text = text.replace(u_euro, '\\EUR') # requires \usepackage{textcomp} in LaTeX source
1371 text = text.replace(u_sum, '$\\Sigma$')
1372
1373 return text
1374
1375 #---------------------------------------------------------------------------
1377 global du_core
1378 if du_core is None:
1379 try:
1380 from docutils import core as du_core
1381 except ImportError:
1382 _log.warning('cannot turn ReST into LaTeX: docutils not installed')
1383 return tex_escape_string(text = rst_text)
1384
1385 parts = du_core.publish_parts (
1386 source = rst_text.replace('\\', '\\\\'),
1387 source_path = '<internal>',
1388 writer_name = 'latex',
1389 #destination_path = '/path/to/LaTeX-template/for/calculating/relative/links/template.tex',
1390 settings_overrides = {
1391 'input_encoding': 'unicode' # un-encoded unicode
1392 },
1393 enable_exit_status = True # how to use ?
1394 )
1395 return parts['body']
1396
1397 #---------------------------------------------------------------------------
1399 global du_core
1400 if du_core is None:
1401 try:
1402 from docutils import core as du_core
1403 except ImportError:
1404 _log.warning('cannot turn ReST into HTML: docutils not installed')
1405 return html_escape_string(text = rst_text, replace_eol=False, keep_visual_eol=False)
1406
1407 parts = du_core.publish_parts (
1408 source = rst_text.replace('\\', '\\\\'),
1409 source_path = '<internal>',
1410 writer_name = 'latex',
1411 #destination_path = '/path/to/LaTeX-template/for/calculating/relative/links/template.tex',
1412 settings_overrides = {
1413 'input_encoding': 'unicode' # un-encoded unicode
1414 },
1415 enable_exit_status = True # how to use ?
1416 )
1417 return parts['body']
1418
1419 #---------------------------------------------------------------------------
1421 # a web search did not reveal anything else for Xe(La)Tex
1422 # as opposed to LaTeX, except true unicode chars
1423 return tex_escape_string(text = text, replace_known_unicode = False)
1424
1425 #---------------------------------------------------------------------------
1426 __html_escape_table = {
1427 "&": "&",
1428 '"': """,
1429 "'": "'",
1430 ">": ">",
1431 "<": "<",
1432 }
1433
1435 text = ''.join(__html_escape_table.get(char, char) for char in text)
1436 if replace_eol:
1437 if keep_visual_eol:
1438 text = text.replace('\n', '<br>\n')
1439 else:
1440 text = text.replace('\n', '<br>')
1441 return text
1442
1443 #---------------------------------------------------------------------------
1446
1447 #---------------------------------------------------------------------------
1449 if isinstance(obj, pydt.datetime):
1450 return obj.isoformat()
1451 raise TypeError('cannot json_serialize(%s)' % type(obj))
1452
1453 #---------------------------------------------------------------------------
1454 #---------------------------------------------------------------------------
1456 _log.info('comparing dict-likes: %s[%s] vs %s[%s]', coalesce(title1, '', '"%s" '), type(d1), coalesce(title2, '', '"%s" '), type(d2))
1457 try:
1458 d1 = dict(d1)
1459 except TypeError:
1460 pass
1461 try:
1462 d2 = dict(d2)
1463 except TypeError:
1464 pass
1465 keys_d1 = list(d1)
1466 keys_d2 = list(d2)
1467 different = False
1468 if len(keys_d1) != len(keys_d2):
1469 _log.info('different number of keys: %s vs %s', len(keys_d1), len(keys_d2))
1470 different = True
1471 for key in keys_d1:
1472 if key in keys_d2:
1473 if type(d1[key]) != type(d2[key]):
1474 _log.info('%25.25s: type(dict1) = %s = >>>%s<<<' % (key, type(d1[key]), d1[key]))
1475 _log.info('%25.25s type(dict2) = %s = >>>%s<<<' % ('', type(d2[key]), d2[key]))
1476 different = True
1477 continue
1478 if d1[key] == d2[key]:
1479 _log.info('%25.25s: both = >>>%s<<<' % (key, d1[key]))
1480 else:
1481 _log.info('%25.25s: dict1 = >>>%s<<<' % (key, d1[key]))
1482 _log.info('%25.25s dict2 = >>>%s<<<' % ('', d2[key]))
1483 different = True
1484 else:
1485 _log.info('%25.25s: %50.50s | <MISSING>' % (key, '>>>%s<<<' % d1[key]))
1486 different = True
1487 for key in keys_d2:
1488 if key in keys_d1:
1489 continue
1490 _log.info('%25.25s: %50.50s | %.50s' % (key, '<MISSING>', '>>>%s<<<' % d2[key]))
1491 different = True
1492 if different:
1493 _log.info('dict-likes appear to be different from each other')
1494 return False
1495 _log.info('dict-likes appear equal to each other')
1496 return True
1497
1498 #---------------------------------------------------------------------------
1499 -def format_dict_likes_comparison(d1, d2, title_left=None, title_right=None, left_margin=0, key_delim=' || ', data_delim=' | ', missing_string='=/=', difference_indicator='! ', ignore_diff_in_keys=None):
1500
1501 _log.info('comparing dict-likes: %s[%s] vs %s[%s]', coalesce(title_left, '', '"%s" '), type(d1), coalesce(title_right, '', '"%s" '), type(d2))
1502 append_type = False
1503 if None not in [title_left, title_right]:
1504 append_type = True
1505 type_left = type(d1)
1506 type_right = type(d2)
1507 if title_left is None:
1508 title_left = '%s' % type_left
1509 if title_right is None:
1510 title_right = '%s' % type_right
1511
1512 try: d1 = dict(d1)
1513 except TypeError: pass
1514 try: d2 = dict(d2)
1515 except TypeError: pass
1516 keys_d1 = list(d1)
1517 keys_d2 = list(d2)
1518 data = {}
1519 for key in keys_d1:
1520 data[key] = [d1[key], ' ']
1521 if key in d2:
1522 data[key][1] = d2[key]
1523 for key in keys_d2:
1524 if key in keys_d1:
1525 continue
1526 data[key] = [' ', d2[key]]
1527 max1 = max([ len('%s' % k) for k in keys_d1 ])
1528 max2 = max([ len('%s' % k) for k in keys_d2 ])
1529 max_len = max(max1, max2, len(_('<type>')))
1530 max_key_len_str = '%' + '%s.%s' % (max_len, max_len) + 's'
1531 max1 = max([ len('%s' % d1[k]) for k in keys_d1 ])
1532 max2 = max([ len('%s' % d2[k]) for k in keys_d2 ])
1533 max_data_len = min(max(max1, max2), 100)
1534 max_data_len_str = '%' + '%s.%s' % (max_data_len, max_data_len) + 's'
1535 diff_indicator_len_str = '%' + '%s.%s' % (len(difference_indicator), len(difference_indicator)) + 's'
1536 line_template = (' ' * left_margin) + diff_indicator_len_str + max_key_len_str + key_delim + max_data_len_str + data_delim + '%s'
1537
1538 lines = []
1539 # debugging:
1540 #lines.append(u' (40 regular spaces)')
1541 #lines.append((u' ' * 40) + u"(u' ' * 40)")
1542 #lines.append((u'%40.40s' % u'') + u"(u'%40.40s' % u'')")
1543 #lines.append((u'%40.40s' % u' ') + u"(u'%40.40s' % u' ')")
1544 #lines.append((u'%40.40s' % u'.') + u"(u'%40.40s' % u'.')")
1545 #lines.append(line_template)
1546 lines.append(line_template % ('', '', title_left, title_right))
1547 if append_type:
1548 lines.append(line_template % ('', _('<type>'), type_left, type_right))
1549
1550 if ignore_diff_in_keys is None:
1551 ignore_diff_in_keys = []
1552
1553 for key in keys_d1:
1554 append_type = False
1555 txt_left_col = '%s' % d1[key]
1556 try:
1557 txt_right_col = '%s' % d2[key]
1558 if type(d1[key]) != type(d2[key]):
1559 append_type = True
1560 except KeyError:
1561 txt_right_col = missing_string
1562 lines.append(line_template % (
1563 bool2subst (
1564 ((txt_left_col == txt_right_col) or (key in ignore_diff_in_keys)),
1565 '',
1566 difference_indicator
1567 ),
1568 key,
1569 shorten_text(txt_left_col, max_data_len),
1570 shorten_text(txt_right_col, max_data_len)
1571 ))
1572 if append_type:
1573 lines.append(line_template % (
1574 '',
1575 _('<type>'),
1576 shorten_text('%s' % type(d1[key]), max_data_len),
1577 shorten_text('%s' % type(d2[key]), max_data_len)
1578 ))
1579
1580 for key in keys_d2:
1581 if key in keys_d1:
1582 continue
1583 lines.append(line_template % (
1584 bool2subst((key in ignore_diff_in_keys), '', difference_indicator),
1585 key,
1586 shorten_text(missing_string, max_data_len),
1587 shorten_text('%s' % d2[key], max_data_len)
1588 ))
1589
1590 return lines
1591
1592 #---------------------------------------------------------------------------
1593 -def format_dict_like(d, relevant_keys=None, template=None, missing_key_template='<[%(key)s] MISSING>', left_margin=0, tabular=False, value_delimiters=('>>>', '<<<'), eol='\n', values2ignore=None):
1594 if values2ignore is None:
1595 values2ignore = []
1596 if template is not None:
1597 # all keys in template better exist in d
1598 try:
1599 return template % d
1600 except KeyError:
1601 # or else
1602 _log.exception('template contains %%()s key(s) which do not exist in data dict')
1603 # try to extend dict <d> to contain all required keys,
1604 # for that to work <relevant_keys> better list all
1605 # keys used in <template>
1606 if relevant_keys is not None:
1607 for key in relevant_keys:
1608 try:
1609 d[key]
1610 except KeyError:
1611 d[key] = missing_key_template % {'key': key}
1612 return template % d
1613
1614 if relevant_keys is None:
1615 relevant_keys = list(d)
1616 lines = []
1617 if value_delimiters is None:
1618 delim_left = ''
1619 delim_right = ''
1620 else:
1621 delim_left, delim_right = value_delimiters
1622 if tabular:
1623 max_len = max([ len('%s' % k) for k in relevant_keys ])
1624 max_len_str = '%s.%s' % (max_len, max_len)
1625 line_template = (' ' * left_margin) + '%' + max_len_str + ('s: %s%%s%s' % (delim_left, delim_right))
1626 else:
1627 line_template = (' ' * left_margin) + '%%s: %s%%s%s' % (delim_left, delim_right)
1628 for key in relevant_keys:
1629 try:
1630 val = d[key]
1631 except KeyError:
1632 continue
1633 if val not in values2ignore:
1634 lines.append(line_template % (key, val))
1635 if eol is None:
1636 return lines
1637 return eol.join(lines)
1638
1639 #---------------------------------------------------------------------------
1640 -def dicts2table(dict_list, left_margin=0, eol='\n', keys2ignore=None, column_labels=None, show_only_changes=False, equality_value='<=>', date_format=None): #, relevant_keys=None, template=None
1641 """Each dict in <dict_list> becomes a column.
1642
1643 - each key of dict becomes a row label, unless in keys2ignore
1644
1645 - each entry in the <column_labels> list becomes a column title
1646 """
1647 keys2show = []
1648 col_max_width = {}
1649 max_width_of_row_label_col = 0
1650 col_label_key = '__________#header#__________'
1651 if keys2ignore is None:
1652 keys2ignore = []
1653 if column_labels is not None:
1654 keys2ignore.append(col_label_key)
1655
1656 # extract keys from all dicts and calculate column sizes
1657 for dict_idx in range(len(dict_list)):
1658 # convert potentially dict-*like* into dict
1659 d = dict(dict_list[dict_idx])
1660 # add max-len column label row from <column_labels> list, if available
1661 if column_labels is not None:
1662 d[col_label_key] = max(column_labels[dict_idx].split('\n'), key = len)
1663 field_lengths = []
1664 # loop over all keys in this dict
1665 for key in d:
1666 # ignore this key
1667 if key in keys2ignore:
1668 continue
1669 # remember length of value when displayed
1670 if isinstance(d[key], pydt.datetime):
1671 if date_format is None:
1672 field_lengths.append(len('%s' % d[key]))
1673 else:
1674 field_lengths.append(len(d[key].strftime(date_format)))
1675 else:
1676 field_lengths.append(len('%s' % d[key]))
1677 if key in keys2show:
1678 continue
1679 keys2show.append(key)
1680 max_width_of_row_label_col = max(max_width_of_row_label_col, len('%s' % key))
1681 col_max_width[dict_idx] = max(field_lengths)
1682
1683 # pivot data into dict of lists per line
1684 lines = { k: [] for k in keys2show }
1685 prev_vals = {}
1686 for dict_idx in range(len(dict_list)):
1687 max_width_this_col = max(col_max_width[dict_idx], len(equality_value)) if show_only_changes else col_max_width[dict_idx]
1688 max_len_str = '%s.%s' % (max_width_this_col, max_width_this_col)
1689 field_template = ' %' + max_len_str + 's'
1690 d = dict_list[dict_idx]
1691 for key in keys2show:
1692 try:
1693 val = d[key]
1694 except KeyError:
1695 lines[key].append(field_template % _('<missing>'))
1696 continue
1697 if isinstance(val, pydt.datetime):
1698 if date_format is not None:
1699 val = val.strftime(date_format)
1700 lines[key].append(field_template % val)
1701 if show_only_changes:
1702 if key not in prev_vals:
1703 prev_vals[key] = '%s' % lines[key][-1]
1704 continue
1705 if lines[key][-1] != prev_vals[key]:
1706 prev_vals[key] = '%s' % lines[key][-1]
1707 continue
1708 lines[key][-1] = field_template % equality_value
1709
1710 # format data into table
1711 table_lines = []
1712 max_len_str = '%s.%s' % (max_width_of_row_label_col, max_width_of_row_label_col)
1713 row_label_template = '%' + max_len_str + 's'
1714 for key in lines:
1715 # row label (= key) into first column
1716 line = (' ' * left_margin) + row_label_template % key + '|'
1717 # append list values as subsequent columns
1718 line += '|'.join(lines[key])
1719 table_lines.append(line)
1720
1721 # insert lines with column labels (column headers) if any
1722 if column_labels is not None:
1723 # first column contains row labels, so no column label needed
1724 table_header_line_w_col_labels = (' ' * left_margin) + row_label_template % ''
1725 # second table header line: horizontal separator
1726 table_header_line_w_separator = (' ' * left_margin) + u_box_horiz_single * (max_width_of_row_label_col)
1727 max_col_label_widths = [ max(col_max_width[dict_idx], len(equality_value)) for dict_idx in range(len(dict_list)) ]
1728 for col_idx in range(len(column_labels)):
1729 max_len_str = '%s.%s' % (max_col_label_widths[col_idx], max_col_label_widths[col_idx])
1730 col_label_template = '%' + max_len_str + 's'
1731 table_header_line_w_col_labels += '| '
1732 table_header_line_w_col_labels += col_label_template % column_labels[col_idx]
1733 table_header_line_w_separator += '%s%s' % (u_box_plus, u_box_horiz_single)
1734 table_header_line_w_separator += u_box_horiz_single * max_col_label_widths[col_idx]
1735 table_lines.insert(0, table_header_line_w_separator)
1736 table_lines.insert(0, table_header_line_w_col_labels)
1737
1738 if eol is None:
1739 return table_lines
1740
1741 return ('|' + eol).join(table_lines) + '|' + eol
1742
1743 #---------------------------------------------------------------------------
1745 for key in required_keys:
1746 try:
1747 d[key]
1748 except KeyError:
1749 if missing_key_template is None:
1750 d[key] = None
1751 else:
1752 d[key] = missing_key_template % {'key': key}
1753 return d
1754
1755 #---------------------------------------------------------------------------
1757 try:
1758 import pyudev
1759 import psutil
1760 except ImportError:
1761 _log.error('pyudev and/or psutil not installed')
1762 return {}
1763
1764 removable_partitions = {}
1765 ctxt = pyudev.Context()
1766 removable_devices = [ dev for dev in ctxt.list_devices(subsystem='block', DEVTYPE='disk') if dev.attributes.get('removable') == b'1' ]
1767 all_mounted_partitions = { part.device: part for part in psutil.disk_partitions() }
1768 for device in removable_devices:
1769 _log.debug('removable device: %s', device.properties['ID_MODEL'])
1770 partitions_on_removable_device = {
1771 part.device_node: {
1772 'type': device.properties['ID_TYPE'],
1773 'bus': device.properties['ID_BUS'],
1774 'device': device.properties['DEVNAME'],
1775 'partition': part.properties['DEVNAME'],
1776 'vendor': part.properties['ID_VENDOR'],
1777 'model': part.properties['ID_MODEL'],
1778 'fs_label': part.properties['ID_FS_LABEL'],
1779 'is_mounted': False,
1780 'mountpoint': None,
1781 'fs_type': None,
1782 'size_in_bytes': -1,
1783 'bytes_free': 0
1784 } for part in ctxt.list_devices(subsystem='block', DEVTYPE='partition', parent=device)
1785 }
1786 for part in partitions_on_removable_device:
1787 try:
1788 partitions_on_removable_device[part]['mountpoint'] = all_mounted_partitions[part].mountpoint
1789 partitions_on_removable_device[part]['is_mounted'] = True
1790 partitions_on_removable_device[part]['fs_type'] = all_mounted_partitions[part].fstype
1791 du = shutil.disk_usage(all_mounted_partitions[part].mountpoint)
1792 partitions_on_removable_device[part]['size_in_bytes'] = du.total
1793 partitions_on_removable_device[part]['bytes_free'] = du.free
1794 except KeyError:
1795 pass # not mounted
1796 removable_partitions.update(partitions_on_removable_device)
1797 return removable_partitions
1798
1799 # debugging:
1800 #ctxt = pyudev.Context()
1801 #for dev in ctxt.list_devices(subsystem='block', DEVTYPE='disk'):# if dev.attributes.get('removable') == b'1':
1802 # for a in dev.attributes.available_attributes:
1803 # print(a, dev.attributes.get(a))
1804 # for key, value in dev.items():
1805 # print('{key}={value}'.format(key=key, value=value))
1806 # print('---------------------------')
1807
1808 #---------------------------------------------------------------------------
1810 try:
1811 import pyudev
1812 except ImportError:
1813 _log.error('pyudev not installed')
1814 return []
1815
1816 optical_writers = []
1817 ctxt = pyudev.Context()
1818 for dev in [ dev for dev in ctxt.list_devices(subsystem='block', DEVTYPE='disk') if dev.properties.get('ID_CDROM_CD_RW', None) == '1' ]:
1819 optical_writers.append ({
1820 'type': dev.properties['ID_TYPE'],
1821 'bus': dev.properties['ID_BUS'],
1822 'device': dev.properties['DEVNAME'],
1823 'model': dev.properties['ID_MODEL']
1824 })
1825 return optical_writers
1826
1827 #---------------------------------------------------------------------------
1828 #---------------------------------------------------------------------------
1830 """Obtains entry from standard input.
1831
1832 prompt: Prompt text to display in standard output
1833 default: Default value (for user to press enter only)
1834 CTRL-C: aborts and returns None
1835 """
1836 if prompt is None:
1837 msg = '(CTRL-C aborts)'
1838 else:
1839 msg = '%s (CTRL-C aborts)' % prompt
1840
1841 if default is None:
1842 msg = msg + ': '
1843 else:
1844 msg = '%s [%s]: ' % (msg, default)
1845
1846 try:
1847 usr_input = input(msg)
1848 except KeyboardInterrupt:
1849 return None
1850
1851 if usr_input == '':
1852 return default
1853
1854 return usr_input
1855
1856 #===========================================================================
1857 # image handling tools
1858 #---------------------------------------------------------------------------
1859 # builtin (ugly but tried and true) fallback icon
1860 __icon_serpent = \
1861 """x\xdae\x8f\xb1\x0e\x83 \x10\x86w\x9f\xe2\x92\x1blb\xf2\x07\x96\xeaH:0\xd6\
1862 \xc1\x85\xd5\x98N5\xa5\xef?\xf5N\xd0\x8a\xdcA\xc2\xf7qw\x84\xdb\xfa\xb5\xcd\
1863 \xd4\xda;\xc9\x1a\xc8\xb6\xcd<\xb5\xa0\x85\x1e\xeb\xbc\xbc7b!\xf6\xdeHl\x1c\
1864 \x94\x073\xec<*\xf7\xbe\xf7\x99\x9d\xb21~\xe7.\xf5\x1f\x1c\xd3\xbdVlL\xc2\
1865 \xcf\xf8ye\xd0\x00\x90\x0etH \x84\x80B\xaa\x8a\x88\x85\xc4(U\x9d$\xfeR;\xc5J\
1866 \xa6\x01\xbbt9\xceR\xc8\x81e_$\x98\xb9\x9c\xa9\x8d,y\xa9t\xc8\xcf\x152\xe0x\
1867 \xe9$\xf5\x07\x95\x0cD\x95t:\xb1\x92\xae\x9cI\xa8~\x84\x1f\xe0\xa3ec"""
1868
1870
1871 paths = gmPaths(app_name = 'gnumed', wx = wx)
1872
1873 candidates = [
1874 os.path.join(paths.system_app_data_dir, 'bitmaps', 'gm_icon-serpent_and_gnu.png'),
1875 os.path.join(paths.local_base_dir, 'bitmaps', 'gm_icon-serpent_and_gnu.png'),
1876 os.path.join(paths.system_app_data_dir, 'bitmaps', 'serpent.png'),
1877 os.path.join(paths.local_base_dir, 'bitmaps', 'serpent.png')
1878 ]
1879
1880 found_as = None
1881 for candidate in candidates:
1882 try:
1883 open(candidate, 'r').close()
1884 found_as = candidate
1885 break
1886 except IOError:
1887 _log.debug('icon not found in [%s]', candidate)
1888
1889 if found_as is None:
1890 _log.warning('no icon file found, falling back to builtin (ugly) icon')
1891 icon_bmp_data = wx.BitmapFromXPMData(pickle.loads(zlib.decompress(__icon_serpent)))
1892 icon.CopyFromBitmap(icon_bmp_data)
1893 else:
1894 _log.debug('icon found in [%s]', found_as)
1895 icon = wx.Icon()
1896 try:
1897 icon.LoadFile(found_as, wx.BITMAP_TYPE_ANY) #_PNG
1898 except AttributeError:
1899 _log.exception("this platform doesn't support wx.Icon().LoadFile()")
1900
1901 return icon
1902
1903 #---------------------------------------------------------------------------
1905 assert (not ((text is None) and (filename is None))), 'either <text> or <filename> must be specified'
1906
1907 try:
1908 import pyqrcode
1909 except ImportError:
1910 _log.exception('cannot import <pyqrcode>')
1911 return None
1912 if text is None:
1913 with io.open(filename, mode = 'rt', encoding = 'utf8') as input_file:
1914 text = input_file.read()
1915 if qr_filename is None:
1916 if filename is None:
1917 qr_filename = get_unique_filename(prefix = 'gm-qr-', suffix = '.png')
1918 else:
1919 qr_filename = get_unique_filename (
1920 prefix = fname_stem(filename) + '-',
1921 suffix = fname_extension(filename) + '.png'
1922 )
1923 _log.debug('[%s] -> [%s]', filename, qr_filename)
1924 qr = pyqrcode.create(text, encoding = 'utf8')
1925 if verbose:
1926 print('input file:', filename)
1927 print('output file:', qr_filename)
1928 print('text to encode:', text)
1929 print(qr.terminal())
1930 qr.png(qr_filename, quiet_zone = 1)
1931 return qr_filename
1932
1933 #===========================================================================
1934 # main
1935 #---------------------------------------------------------------------------
1936 if __name__ == '__main__':
1937
1938 if len(sys.argv) < 2:
1939 sys.exit()
1940
1941 if sys.argv[1] != 'test':
1942 sys.exit()
1943
1944 # for testing:
1945 logging.basicConfig(level = logging.DEBUG)
1946 from Gnumed.pycommon import gmI18N
1947 gmI18N.activate_locale()
1948 gmI18N.install_domain()
1949
1950 #-----------------------------------------------------------------------
1952
1953 tests = [
1954 [None, False],
1955
1956 ['', False],
1957 [' 0 ', True, 0],
1958
1959 [0, True, 0],
1960 [0.0, True, 0],
1961 [.0, True, 0],
1962 ['0', True, 0],
1963 ['0.0', True, 0],
1964 ['0,0', True, 0],
1965 ['00.0', True, 0],
1966 ['.0', True, 0],
1967 [',0', True, 0],
1968
1969 [0.1, True, decimal.Decimal('0.1')],
1970 [.01, True, decimal.Decimal('0.01')],
1971 ['0.1', True, decimal.Decimal('0.1')],
1972 ['0,1', True, decimal.Decimal('0.1')],
1973 ['00.1', True, decimal.Decimal('0.1')],
1974 ['.1', True, decimal.Decimal('0.1')],
1975 [',1', True, decimal.Decimal('0.1')],
1976
1977 [1, True, 1],
1978 [1.0, True, 1],
1979 ['1', True, 1],
1980 ['1.', True, 1],
1981 ['1,', True, 1],
1982 ['1.0', True, 1],
1983 ['1,0', True, 1],
1984 ['01.0', True, 1],
1985 ['01,0', True, 1],
1986 [' 01, ', True, 1],
1987
1988 [decimal.Decimal('1.1'), True, decimal.Decimal('1.1')]
1989 ]
1990 for test in tests:
1991 conversion_worked, result = input2decimal(initial = test[0])
1992
1993 expected2work = test[1]
1994
1995 if conversion_worked:
1996 if expected2work:
1997 if result == test[2]:
1998 continue
1999 else:
2000 print("ERROR (conversion result wrong): >%s<, expected >%s<, got >%s<" % (test[0], test[2], result))
2001 else:
2002 print("ERROR (conversion worked but was expected to fail): >%s<, got >%s<" % (test[0], result))
2003 else:
2004 if not expected2work:
2005 continue
2006 else:
2007 print("ERROR (conversion failed but was expected to work): >%s<, expected >%s<" % (test[0], test[2]))
2008 #-----------------------------------------------------------------------
2013 #-----------------------------------------------------------------------
2015
2016 val = None
2017 print(val, coalesce(val, 'is None', 'is not None'))
2018 val = 1
2019 print(val, coalesce(val, 'is None', 'is not None'))
2020 return
2021
2022 import datetime as dt
2023 print(coalesce(value2test = dt.datetime.now(), template4value = '-- %s --', function4value = ('strftime', '%Y-%m-%d')))
2024
2025 print('testing coalesce()')
2026 print("------------------")
2027 tests = [
2028 [None, 'something other than <None>', None, None, 'something other than <None>'],
2029 ['Captain', 'Mr.', '%s.'[:4], 'Mr.', 'Capt.'],
2030 ['value to test', 'test 3 failed', 'template with "%s" included', None, 'template with "value to test" included'],
2031 ['value to test', 'test 4 failed', 'template with value not included', None, 'template with value not included'],
2032 [None, 'initial value was None', 'template4value: %s', None, 'initial value was None'],
2033 [None, 'initial value was None', 'template4value: %%(abc)s', None, 'initial value was None']
2034 ]
2035 passed = True
2036 for test in tests:
2037 result = coalesce (
2038 value2test = test[0],
2039 return_instead = test[1],
2040 template4value = test[2],
2041 template4instead = test[3]
2042 )
2043 if result != test[4]:
2044 print("ERROR")
2045 print("coalesce: (%s, %s, %s, %s)" % (test[0], test[1], test[2], test[3]))
2046 print("expected:", test[4])
2047 print("received:", result)
2048 passed = False
2049
2050 if passed:
2051 print("passed")
2052 else:
2053 print("failed")
2054 return passed
2055 #-----------------------------------------------------------------------
2057 print('testing capitalize() ...')
2058 success = True
2059 pairs = [
2060 # [original, expected result, CAPS mode]
2061 ['Boot', 'Boot', CAPS_FIRST_ONLY],
2062 ['boot', 'Boot', CAPS_FIRST_ONLY],
2063 ['booT', 'Boot', CAPS_FIRST_ONLY],
2064 ['BoOt', 'Boot', CAPS_FIRST_ONLY],
2065 ['boots-Schau', 'Boots-Schau', CAPS_WORDS],
2066 ['boots-sChau', 'Boots-Schau', CAPS_WORDS],
2067 ['boot camp', 'Boot Camp', CAPS_WORDS],
2068 ['fahrner-Kampe', 'Fahrner-Kampe', CAPS_NAMES],
2069 ['häkkönen', 'Häkkönen', CAPS_NAMES],
2070 ['McBurney', 'McBurney', CAPS_NAMES],
2071 ['mcBurney', 'McBurney', CAPS_NAMES],
2072 ['blumberg', 'Blumberg', CAPS_NAMES],
2073 ['roVsing', 'RoVsing', CAPS_NAMES],
2074 ['Özdemir', 'Özdemir', CAPS_NAMES],
2075 ['özdemir', 'Özdemir', CAPS_NAMES],
2076 ]
2077 for pair in pairs:
2078 result = capitalize(pair[0], pair[2])
2079 if result != pair[1]:
2080 success = False
2081 print('ERROR (caps mode %s): "%s" -> "%s", expected "%s"' % (pair[2], pair[0], result, pair[1]))
2082
2083 if success:
2084 print("... SUCCESS")
2085
2086 return success
2087 #-----------------------------------------------------------------------
2089 print("testing import_module_from_directory()")
2090 path = sys.argv[1]
2091 name = sys.argv[2]
2092 try:
2093 mod = import_module_from_directory(module_path = path, module_name = name)
2094 except Exception:
2095 print("module import failed, see log")
2096 return False
2097
2098 print("module import succeeded", mod)
2099 print(dir(mod))
2100 return True
2101 #-----------------------------------------------------------------------
2105 #-----------------------------------------------------------------------
2107 print("testing gmPaths()")
2108 print("-----------------")
2109 paths = gmPaths(wx=None, app_name='gnumed')
2110 print("user config dir:", paths.user_config_dir)
2111 print("system config dir:", paths.system_config_dir)
2112 print("local base dir:", paths.local_base_dir)
2113 print("system app data dir:", paths.system_app_data_dir)
2114 print("working directory :", paths.working_dir)
2115 print("temp directory :", paths.tmp_dir)
2116 #-----------------------------------------------------------------------
2118 print("testing none_if()")
2119 print("-----------------")
2120 tests = [
2121 [None, None, None],
2122 ['a', 'a', None],
2123 ['a', 'b', 'a'],
2124 ['a', None, 'a'],
2125 [None, 'a', None],
2126 [1, 1, None],
2127 [1, 2, 1],
2128 [1, None, 1],
2129 [None, 1, None]
2130 ]
2131
2132 for test in tests:
2133 if none_if(value = test[0], none_equivalent = test[1]) != test[2]:
2134 print('ERROR: none_if(%s) returned [%s], expected [%s]' % (test[0], none_if(test[0], test[1]), test[2]))
2135
2136 return True
2137 #-----------------------------------------------------------------------
2139 tests = [
2140 [True, 'Yes', 'Yes', 'Yes'],
2141 [False, 'OK', 'not OK', 'not OK']
2142 ]
2143 for test in tests:
2144 if bool2str(test[0], test[1], test[2]) != test[3]:
2145 print('ERROR: bool2str(%s, %s, %s) returned [%s], expected [%s]' % (test[0], test[1], test[2], bool2str(test[0], test[1], test[2]), test[3]))
2146
2147 return True
2148 #-----------------------------------------------------------------------
2150
2151 print(bool2subst(True, 'True', 'False', 'is None'))
2152 print(bool2subst(False, 'True', 'False', 'is None'))
2153 print(bool2subst(None, 'True', 'False', 'is None'))
2154 #-----------------------------------------------------------------------
2156 print(get_unique_filename())
2157 print(get_unique_filename(prefix='test-'))
2158 print(get_unique_filename(suffix='tst'))
2159 print(get_unique_filename(prefix='test-', suffix='tst'))
2160 print(get_unique_filename(tmp_dir='/home/ncq/Archiv/'))
2161 #-----------------------------------------------------------------------
2163 print("testing size2str()")
2164 print("------------------")
2165 tests = [0, 1, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000, 10000000000, 100000000000, 1000000000000, 10000000000000]
2166 for test in tests:
2167 print(size2str(test))
2168 #-----------------------------------------------------------------------
2170
2171 test = """
2172 second line\n
2173 3rd starts with tab \n
2174 4th with a space \n
2175
2176 6th
2177
2178 """
2179 print(unwrap(text = test, max_length = 25))
2180 #-----------------------------------------------------------------------
2182 test = 'line 1\nline 2\nline 3'
2183
2184 print("wrap 5-6-7 initial 0, subsequent 0")
2185 print(wrap(test, 5))
2186 print()
2187 print(wrap(test, 6))
2188 print()
2189 print(wrap(test, 7))
2190 print("-------")
2191 input()
2192 print("wrap 5 initial 1-1-3, subsequent 1-3-1")
2193 print(wrap(test, 5, ' ', ' '))
2194 print()
2195 print(wrap(test, 5, ' ', ' '))
2196 print()
2197 print(wrap(test, 5, ' ', ' '))
2198 print("-------")
2199 input()
2200 print("wrap 6 initial 1-1-3, subsequent 1-3-1")
2201 print(wrap(test, 6, ' ', ' '))
2202 print()
2203 print(wrap(test, 6, ' ', ' '))
2204 print()
2205 print(wrap(test, 6, ' ', ' '))
2206 print("-------")
2207 input()
2208 print("wrap 7 initial 1-1-3, subsequent 1-3-1")
2209 print(wrap(test, 7, ' ', ' '))
2210 print()
2211 print(wrap(test, 7, ' ', ' '))
2212 print()
2213 print(wrap(test, 7, ' ', ' '))
2214 #-----------------------------------------------------------------------
2216 print('md5 %s: %s' % (sys.argv[2], file2md5(sys.argv[2])))
2217 print('chunked md5 %s: %s' % (sys.argv[2], file2chunked_md5(sys.argv[2])))
2218 #-----------------------------------------------------------------------
2220 print(u_link_symbol * 10)
2221 #-----------------------------------------------------------------------
2223 print(xml_escape_string('<'))
2224 print(xml_escape_string('>'))
2225 print(xml_escape_string('&'))
2226 #-----------------------------------------------------------------------
2228 tests = ['\\', '^', '~', '{', '}', '%', '&', '#', '$', '_', u_euro, 'abc\ndef\n\n1234']
2229 tests.append(' '.join(tests))
2230 for test in tests:
2231 print('%s:' % test, tex_escape_string(test))
2232
2233 #-----------------------------------------------------------------------
2235 tests = ['\\', '^', '~', '{', '}', '%', '&', '#', '$', '_', u_euro, 'abc\ndef\n\n1234']
2236 tests.append(' '.join(tests))
2237 tests.append('C:\Windows\Programme\System 32\lala.txt')
2238 tests.extend([
2239 'should be identical',
2240 'text *some text* text',
2241 """A List
2242 ======
2243
2244 1. 1
2245 2. 2
2246
2247 3. ist-list
2248 1. more
2249 2. noch was ü
2250 #. nummer x"""
2251 ])
2252 for test in tests:
2253 print('==================================================')
2254 print('raw:')
2255 print(test)
2256 print('---------')
2257 print('ReST 2 LaTeX:')
2258 latex = rst2latex_snippet(test)
2259 print(latex)
2260 if latex.strip() == test.strip():
2261 print('=> identical')
2262 print('---------')
2263 print('tex_escape_string:')
2264 print(tex_escape_string(test))
2265 input()
2266
2267 #-----------------------------------------------------------------------
2269 tests = [
2270 'one line, no embedded line breaks ',
2271 'one line\nwith embedded\nline\nbreaks\n '
2272 ]
2273 for test in tests:
2274 print('as list:')
2275 print(strip_trailing_empty_lines(text = test, eol='\n', return_list = True))
2276 print('as string:')
2277 print('>>>%s<<<' % strip_trailing_empty_lines(text = test, eol='\n', return_list = False))
2278 tests = [
2279 ['list', 'without', 'empty', 'trailing', 'lines'],
2280 ['list', 'with', 'empty', 'trailing', 'lines', '', ' ', '']
2281 ]
2282 for test in tests:
2283 print('as list:')
2284 print(strip_trailing_empty_lines(lines = test, eol = '\n', return_list = True))
2285 print('as string:')
2286 print(strip_trailing_empty_lines(lines = test, eol = '\n', return_list = False))
2287 #-----------------------------------------------------------------------
2289 tests = [
2290 r'abc.exe',
2291 r'\abc.exe',
2292 r'c:\abc.exe',
2293 r'c:\d\abc.exe',
2294 r'/home/ncq/tmp.txt',
2295 r'~/tmp.txt',
2296 r'./tmp.txt',
2297 r'./.././tmp.txt',
2298 r'tmp.txt'
2299 ]
2300 for t in tests:
2301 print("[%s] -> [%s]" % (t, fname_stem(t)))
2302 #-----------------------------------------------------------------------
2304 print(sys.argv[2], 'empty:', dir_is_empty(sys.argv[2]))
2305
2306 #-----------------------------------------------------------------------
2308 d1 = {}
2309 d2 = {}
2310 d1[1] = 1
2311 d1[2] = 2
2312 d1[3] = 3
2313 # 4
2314 d1[5] = 5
2315
2316 d2[1] = 1
2317 d2[2] = None
2318 # 3
2319 d2[4] = 4
2320
2321 #compare_dict_likes(d1, d2)
2322
2323 d1 = {1: 1, 2: 2}
2324 d2 = {1: 1, 2: 2}
2325
2326 #compare_dict_likes(d1, d2, 'same1', 'same2')
2327 print(format_dict_like(d1, tabular = False))
2328 print(format_dict_like(d1, tabular = True))
2329 #print(format_dict_like(d2))
2330
2331 #-----------------------------------------------------------------------
2333 d1 = {}
2334 d2 = {}
2335 d1[1] = 1
2336 d1[2] = 2
2337 d1[3] = 3
2338 # 4
2339 d1[5] = 5
2340
2341 d2[1] = 1
2342 d2[2] = None
2343 # 3
2344 d2[4] = 4
2345
2346 print('\n'.join(format_dict_likes_comparison(d1, d2, 'd1', 'd2')))
2347
2348 d1 = {1: 1, 2: 2}
2349 d2 = {1: 1, 2: 2}
2350
2351 print('\n'.join(format_dict_likes_comparison(d1, d2, 'd1', 'd2')))
2352
2353 #-----------------------------------------------------------------------
2355 rmdir('cx:\windows\system3__2xxxxxxxxxxxxx')
2356
2357 #-----------------------------------------------------------------------
2359 #print(rm_dir_content('cx:\windows\system3__2xxxxxxxxxxxxx'))
2360 print(rm_dir_content('/tmp/user/1000/tmp'))
2361
2362 #-----------------------------------------------------------------------
2364 tests = [
2365 ('', '', ''),
2366 ('a', 'a', ''),
2367 ('GMd: a window title', _GM_TITLE_PREFIX + ':', 'a window title'),
2368 ('\.br\MICROCYTES+1\.br\SPHEROCYTES present\.br\POLYCHROMASIAmoderate\.br\\', '\.br\\', 'MICROCYTES+1\.br\SPHEROCYTES present\.br\POLYCHROMASIAmoderate\.br\\')
2369 ]
2370 for test in tests:
2371 text, prefix, expect = test
2372 result = strip_prefix(text, prefix, remove_whitespace = True)
2373 if result == expect:
2374 continue
2375 print('test failed:', test)
2376 print('result:', result)
2377
2378 #-----------------------------------------------------------------------
2380 tst = [
2381 ('123', 1),
2382 ('123', 2),
2383 ('123', 3),
2384 ('123', 4),
2385 ('', 1),
2386 ('1', 1),
2387 ('12', 1),
2388 ('', 2),
2389 ('1', 2),
2390 ('12', 2),
2391 ('123', 2)
2392 ]
2393 for txt, lng in tst:
2394 print('max', lng, 'of', txt, '=', shorten_text(txt, lng))
2395 #-----------------------------------------------------------------------
2397 tests = [
2398 '/tmp/test.txt',
2399 '/tmp/ test.txt',
2400 '/tmp/ tes\\t.txt',
2401 'test'
2402 ]
2403 for test in tests:
2404 print (test, fname_sanitize(test))
2405
2406 #-----------------------------------------------------------------------
2409
2410 #-----------------------------------------------------------------------
2412 parts = enumerate_removable_partitions()
2413 for part_name in parts:
2414 part = parts[part_name]
2415 print(part['device'])
2416 print(part['partition'])
2417 if part['is_mounted']:
2418 print('%s@%s: %s on %s by %s @ %s (FS=%s: %s free of %s total)' % (
2419 part['type'],
2420 part['bus'],
2421 part['fs_label'],
2422 part['model'],
2423 part['vendor'],
2424 part['mountpoint'],
2425 part['fs_type'],
2426 part['bytes_free'],
2427 part['size_in_bytes']
2428 ))
2429 else:
2430 print('%s@%s: %s on %s by %s (not mounted)' % (
2431 part['type'],
2432 part['bus'],
2433 part['fs_label'],
2434 part['model'],
2435 part['vendor']
2436 ))
2437
2438 #-----------------------------------------------------------------------
2440 for writer in enumerate_optical_writers():
2441 print('%s@%s: %s @ %s' % (
2442 writer['type'],
2443 writer['bus'],
2444 writer['model'],
2445 writer['device']
2446 ))
2447
2448 #-----------------------------------------------------------------------
2452
2453 #-----------------------------------------------------------------------
2455 print(mk_sandbox_dir(base_dir = '/tmp/abcd/efg/h'))
2456
2457 #-----------------------------------------------------------------------
2459 dicts = [
2460 {'pkey': 1, 'value': 'a122'},
2461 {'pkey': 2, 'value': 'b23'},
2462 {'pkey': 3, 'value': 'c3'},
2463 {'pkey': 4, 'value': 'd4ssssssssssss'},
2464 {'pkey': 5, 'value': 'd4 asdfas '},
2465 {'pkey': 5, 'value': 'c5---'},
2466 ]
2467 with open('x.txt', 'w', encoding = 'utf8') as f:
2468 f.write(dicts2table(dicts, left_margin=2, eol='\n', keys2ignore=None, show_only_changes=True, column_labels = ['d1', 'd2', 'd3', 'd4', 'd5', 'd6']))
2469 #print(dicts2table(dicts, left_margin=2, eol='\n', keys2ignore=None, show_only_changes=True, column_labels = ['d1', 'd2', 'd3', 'd4', 'd5', 'd6']))
2470
2471 #-----------------------------------------------------------------------
2473 global _client_version
2474 _client_version = 'dev.test'
2475 print(create_directory_description_file (
2476 directory = './',
2477 readme = 'test\ntest2\nsome more text',
2478 suffix = None
2479 ))
2480
2481 #-----------------------------------------------------------------------
2482 #test_coalesce()
2483 #test_capitalize()
2484 #test_import_module()
2485 #test_mkdir()
2486 #test_gmPaths()
2487 #test_none_if()
2488 #test_bool2str()
2489 #test_bool2subst()
2490 #test_get_unique_filename()
2491 #test_size2str()
2492 #test_wrap()
2493 #test_input2decimal()
2494 #test_input2int()
2495 #test_unwrap()
2496 #test_md5()
2497 #test_unicode()
2498 #test_xml_escape()
2499 #test_strip_trailing_empty_lines()
2500 #test_fname_stem()
2501 #test_tex_escape()
2502 #test_rst2latex_snippet()
2503 #test_dir_is_empty()
2504 #test_compare_dicts()
2505 #test_rm_dir()
2506 #test_rm_dir_content()
2507 #test_strip_prefix()
2508 #test_shorten_text()
2509 #test_format_compare_dicts()
2510 #test_fname_sanitize()
2511 #test_create_qrcode()
2512 #test_enumerate_removable_partitions()
2513 #test_enumerate_optical_writers()
2514 #test_copy_tree_content()
2515 #test_mk_sandbox_dir()
2516 #test_make_table_from_dicts()
2517 test_create_dir_desc_file()
2518
2519 #===========================================================================
2520
| Home | Trees | Indices | Help |
|
|---|
| Generated by Epydoc 3.0.1 on Thu Jul 23 01:55:31 2020 | http://epydoc.sourceforge.net |