forked from glibsonoran/Plush-for-ComfyUI
-
Notifications
You must be signed in to change notification settings - Fork 0
/
mng_json.py
1702 lines (1401 loc) · 75 KB
/
mng_json.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import json
import os
import shutil
from enum import Enum
import bisect
from datetime import datetime, timedelta
import re
import math
import time
from pathlib import Path
from typing import Optional, Any, Union, List, Dict
class TroubleSgltn:
"""
A Singleton class that acts as a central hub for log messages logged using json_manager.log_events().
This class formats and stores these event messages until the reset() method is called, clearing the data and optionally
creating a process header describing the method or class that's the origin of the logs that follow.
Nodes that use this class should initialize with TroubleSgltn.reset('my_process') at the top of the main method at the start of the run.
If you want a more granular listing of the processes being logged you can append a new process header using set_process_header.
The node's main method can then query the .get_troubles() method at the end of the run to fetch all stored log messages
and present them to the user in the return tuple: 'return(result, TroubleSgltn.get_trouble()').
"""
_instance = None
class Severity(Enum):
INFO = 1
WARNING = 2
ERROR = 3
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
# Initialize any necessary attributes here
cls._troubles = "" # Example attribute for storing trouble messages
cls._section_bullet = "\n\u27a4"
cls._bullet = "\u2726"
cls._new_line = "\n"
cls._header_stack = []
return cls._instance
def set_process_header(self, process_head:str="New Process")-> None:
self._troubles += f'{self._section_bullet} Begin Log for: {process_head}:{self._new_line}'
self._header_stack.append(process_head)
def pop_header(self)->bool:
is_popped = False
if self._header_stack:
self._header_stack.pop()
if self._header_stack:
process_head = self._header_stack[-1]
self._troubles += f'{self._section_bullet} Begin Log for: {process_head}:{self._new_line}'
is_popped = True
return is_popped
def log_trouble(self, message: str, severity: Severity) -> None:
"""
Logs a trouble message with a specified severity,
and formats it for display.
Args:
message (str): The trouble message to log.
severity (str): The severity level of the message.
"""
# Example implementation; customize as needed
trouble_message = f"{self._bullet} {severity.name}: {message}{self._new_line}"
self._troubles += trouble_message
def reset(self, process_head:str='') -> None:
"""
Resets the stored trouble messages.
Sets log name header if value is passed
"""
self._troubles = ""
self._header_stack = []
if process_head:
self.set_process_header(process_head)
def get_troubles(self) -> str:
"""
Returns the stored trouble messages.
Returns:
str: The accumulated trouble messages.
"""
return self._troubles if self._troubles else "No Troubles"
class helpSgltn:
#Singleton class that contains help text for various nodes
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance.get_file()
return cls._instance
def get_file(self):
#Open help.json
j_mmgr = json_manager()
help_file = j_mmgr.append_filename_to_path(j_mmgr.script_dir, 'help.json')
help_data = j_mmgr.load_json(help_file, False)
self._style_prompt_help = ""
self._dalle_help = ''
self._exif_wrangler_help = ''
self._adv_prompt_help =''
self._tagger_help = ""
# Empty help text is not a critical issue for the app
if not help_data:
j_mmgr.log_events('Help data file is empty or missing.',
TroubleSgltn.Severity.ERROR)
return
#Get help text
self._style_prompt_help = help_data.get('sp_help','')
self._exif_wrangler_help = help_data.get('wrangler_help', '')
self._dalle_help = help_data.get('dalle_help', '')
self._adv_prompt_help = help_data.get('adv_prompt_help', '')
self._tagger_help = help_data.get('tagger_help', '')
@property
def style_prompt_help(self)->str:
return self._style_prompt_help
@property
def exif_wrangler_help(self)->str:
return self._exif_wrangler_help
@property
def dalle_help(self)->str:
return self._dalle_help
@property
def adv_prompt_help(self)->str:
return self._adv_prompt_help
@property
def tagger_help(self)->str:
return self._tagger_help
class json_manager:
def __init__(self):
self.trbl = TroubleSgltn()
# Get the directory where the script is located
# Public properties
self.script_dir = os.path.dirname(os.path.abspath(__file__))
self.script_parent, self.script_parent_dirname = self.findParent(self.script_dir)
self.update_file = os.path.join(self.script_dir, 'update.json')
self.config_file = os.path.join(self.script_dir, 'config.json')
self.backup_dir = self.find_child_directory(self.script_dir, 'bkup', True, True)
self.backup_config_path = os.path.join(self.backup_dir, 'config.json')
self.log_dir = self.find_child_directory(self.script_dir, 'logs', True)
self.log_file_name = "Plush-Events"
self.comfy_dir = self.find_target_directory(self.script_dir, 'ComfyUI', True)
if not self.comfy_dir:
#The parent of the script parent is: ComfyUI or its hierarchical equivalent in non-standard installations
self.comfy_dir, nm = self.findParent(self.script_parent, as_string=True)
self.log_events(f"Non standard directory structure, ComfyUI directory name: {nm}")
#Private Properties
self._config_bad = os.path.join(self.script_dir, 'config.bad')
self._update_bad = os.path.join(self.script_dir, 'update.bad')
def log_events(self, event: str, severity: TroubleSgltn.Severity = TroubleSgltn.Severity.INFO, is_trouble: bool = False,
file_name: Union[str,None] = None, is_critical: bool=False) -> bool:
"""
Appends events with prepended timestamp to a text log file.
Each event is written in a key/value pair format.
Creates the file if it doesn't exist. Also, prints to console if specified.
Args:
event (str): The event information.
severity (TroubleSgltn.Severity): An Enum indicating the severity of the issue
file_name (str): The name of the log file. Defaults to self.log_file_name if None.
is_trouble (bool): Whether to log the event in TroubleSgltn to be presented to the user
Returns:
bool: True if successful, False otherwise.
"""
if file_name is None:
file_name = self.log_file_name
if is_trouble:
self.trbl.log_trouble(event, severity)
date_time = datetime.now()
timestamp = date_time.strftime("%Y-%m-%d %I:%M:%S %p") #YYYY/MM/DD, 12 hour AM/PM
#Create a dict of the log event
log_event_data = {
"timestamp": timestamp,
"severity": severity.name,
"event": event
}
#Conver the dict to a json string using json.dumps to handle invalid chars.
log_event_json = self.convert_to_json_string(log_event_data, is_logger=True)
if log_event_json is None:
return False
log_file_path = self.append_filename_to_path(self.log_dir, f"{file_name}.log", True)
# Use the append_to_file utility to write the log event to the file
success = self.append_to_file(log_event_json, log_file_path, is_critical, is_logger=True)
return success
def append_to_file (self, data:str, file_path: Union[str,Path], is_critical:bool, is_logger:bool = False)->bool:
"""
Appends a text string to a file.
Makes the file if it doesn't exist
Args:
data (str): The text string to append to the file
file_path (Union[str, Path]): The path and name of the file to which string will be appended
is_critical (bool): If True raises exceptions for errors
Returns:
True if the append action is successful else False
"""
file_path = Path(file_path)
try:
with open(file_path, 'a', encoding='utf-8') as file:
file.write(data + '\n')
return True
except (IOError, OSError) as e:
if not is_logger:
self.log_events(f"Error writing data to file: {file_path}: {e}",
TroubleSgltn.Severity.ERROR,
True)
if is_critical:
raise
return False
def findParent(self, child:Union[Path, str], as_string: bool = True)->Union[tuple, None]:
"""
Returns the parent directory path and parent directory name using pathlib.
Args:
child (Union[Path,str]): Path of the directory whose parent is to be found.
as_string (bool): Determines if the return value will be a Path object or string
Returns:
tuple: A tuple containing the path to the parent directory and the parent directory name.
"""
if child:
child_path = Path(child).resolve()
parent_path = child_path.parent
parent_name = parent_path.name
else:
return None
return (str(parent_path), parent_name) if as_string else (parent_path, parent_name)
def find_target_directory(self, start_path: Union[Path, str], target_dir_name: str, as_string: bool = True) -> Union[Path, str, None]:
"""
Walks up the directory structure from start_path to find a directory named target_dir_name.
Args:
start_path (Union [Path, str]): The starting directory path.
target_dir_name (str): The target directory name to find.
s_string (bool): Determines if the return value will be a Path object or string
Returns:
Path: A Path object or string of the found directory path, or None if not found.
"""
current_path = Path(start_path).resolve()
for parent in current_path.parents:
if parent.name == target_dir_name:
return str(parent) if as_string else parent
return None
def append_filename_to_path(self, directory_path: Union[Path, str], filename: str, as_string: bool = True) -> Union[Path, str]:
"""
Appends a filename to the given directory path and returns the result as either a Path object or a string.
Args:
directory_path (Union[Path, str]): The path of the directory as a Path object or a string.
filename (str): The filename to append.
as_string (bool): If True, returns the path as a string.
Returns:
Union[Path, str]: The combined path including the filename, either as a Path object or a string.
"""
combined_path = Path(directory_path) / filename
return str(combined_path) if as_string else combined_path
def delete_files_by_age(self, file_path: Union[str, Path], file_pattern: str, max_age_days: int=10, is_critical: bool=False)->bool:
"""
Deletes files in a specified directory that match a given pattern and are older than a specified age.
Args:
- file_path (Union[str, Path]): Path to the directory where files will be deleted.
- file_pattern (str): The pattern to match files. Can include wildcards, e.g., '*.txt', '*.*', 'myfile.txt'.
- max_age_days (int): Maximum age of files to keep. Files older than this will be deleted.
- is_critical (bool): Whether or not to raise file errors
Returns:
- (Bool): True if deletion is successful False if not
"""
directory = Path(file_path)
current_time = time.time()
all_deletions_successful = True
# Ensure the provided path is a directory
if not directory.is_dir():
self.log_events(f"'delete_file_by_age': The path {directory} is not a directory.",
TroubleSgltn.Severity.WARNING,
True)
return False
# Iterate over files matching the pattern
for file in directory.glob(file_pattern):
file_age_days = (current_time - file.stat().st_ctime) / (24 * 3600)
if file_age_days > max_age_days:
try:
file.unlink()
self.log_events(f"Deleted file: {file}",is_trouble=True)
except Exception as e:
self.log_events(f"Error deleting file {file}: {e}",
TroubleSgltn.Severity.WARNING,
True)
all_deletions_successful = False
if is_critical:
raise
return all_deletions_successful
def remove_log_entries_by_age(self, log_file_path, days_allowed):
timestamp_format = "%Y-%m-%d %I:%M:%S %p"
cutoff_time = datetime.now() - timedelta(days=days_allowed)
deleted_count = 0
updated_entries = []
try:
with open(log_file_path, "r", encoding='utf-8') as file:
for line in file:
# Skip empty lines
if not line.strip():
continue
log_entry = json.loads(line)
entry_time = datetime.strptime(log_entry["timestamp"], timestamp_format)
if entry_time > cutoff_time:
updated_entries.append(json.dumps(log_entry))
else:
deleted_count += 1
except Exception as e:
self.log_events(f"Error reading log file: {log_file_path}: {e}",
TroubleSgltn.Severity.WARNING,
True)
return None
# Use write_string_to_file to write updated entries back
updated_content = "\n".join(updated_entries) + '\n'
success = self.write_string_to_file(updated_content, log_file_path)
if not success:
self.log_events(f"Failed to write updated log entries back to file: {log_file_path}",
TroubleSgltn.Severity.ERROR,
True)
return None
return deleted_count
def generate_unique_filename(self, extension: str, base: str="")->str:
"""
Generates a unique file name by incorporating Date and Time
with a base and extension provided by the user
Args
extension (str) The file extension
base (str) The first part of the file name
Returns:
A unique filename with a unique numeric value prefaced by the base
"""
# Get current date and time
current_datetime = datetime.now()
# Format the date and time in a specific format, e.g., YYYYMMDD_HHMMSS
datetime_str = current_datetime.strftime("%Y%m%d_%H%M%S")
# Append this string to your base file name
unique_filename = f"{base}{datetime_str}.{extension}"
return unique_filename
def find_child_directory(self, parent: Union[str,Path], child: str, create: bool = False, as_string: bool = True) -> Union[Path, str]:
"""
Finds a child directory within a given parent directory or optionally creates it if it doesn't exist.
Args:
parent (str): The starting directory path.
child (str): The target child directory name to find or create.
create (bool): If True, creates the child directory if it does not exist.
as_string (bool): If True, returns the path as a string; False returns Path Object.
Returns:
Union[Path, str]: A Path object or string of the found or created child directory, or None if not found/created.
"""
parent_path = Path(parent)
child_path = parent_path / child
if not child_path.is_dir() and create:
try:
child_path.mkdir(parents=True, exist_ok=True)
except OSError as e:
self.log_events(f"Error creating directory {child_path}: {e}",
TroubleSgltn.Severity.WARNING,
True)
return ""
return str(child_path) if as_string else child_path
# Load a file
def load_json(self, ld_file: Union[str,Path], is_critical: bool=False):
"""
Loads a JSON file.
Args:
ld_file (str): Path to the JSON file to be loaded.
is_critical (bool): If True, raises exceptions for errors.
Returns:
dict or None: The loaded JSON data (dict) if valid, None otherwise.
"""
try:
with open(ld_file, 'r', encoding='utf-8') as file:
return json.load(file)
except json.JSONDecodeError as e:
self.log_events(f'JSON syntax error in: {ld_file}: {e}',
TroubleSgltn.Severity.WARNING,
True)
except FileNotFoundError:
self.log_events(f"File not found: {ld_file}",
TroubleSgltn.Severity.WARNING,
True)
except Exception as e:
self.log_events(f"Plush - An unexpected error occurred while reading {ld_file}: {e}",
TroubleSgltn.Severity.WARNING,
True)
if is_critical:
raise
return None
def write_json(self, data: dict, file_path: str, is_critical: bool=False):
"""
Writes a Python dictionary to a JSON file.
Args:
data (dict): The data to write to the JSON file.
file_path (str): The path of the file to be written.
is_critical (bool): If True raises exceptions for errors
Returns:
bool: True if write operation was successful, False otherwise.
"""
try:
with open(file_path, 'w',encoding='utf-8') as file:
json.dump(data, file, indent=4)
return True
except TypeError as e:
self.log_events(f"Plush - Data type not serializable in {file_path}: {e}",
TroubleSgltn.Severity.WARNING,
True)
except Exception as e:
self.log_events(f"Plush - An error occurred while writing to JSON: {file_path}: {e}",
TroubleSgltn.Severity.WARNING,
True)
if is_critical:
raise
return False
def read_lines_of_file(self, file_path: Union[str, Path], lines: int = 0, comment_char: str = '#', is_critical: bool = False) -> list | None:
"""
Loads a specified number of lines from a text file, skipping lines that start with a comment character.
Args:
file_path (Union[str, Path]): The path to the file from which data will be loaded.
lines (int): Number of lines to read from the file. A zero argument returns all lines.
comment_char (str): The character used to denote comments. Default is '#'.
is_critical (bool): If True, raises exceptions for errors.
Returns:
list or None: The loaded list of file lines (excluding comments), or None otherwise.
"""
file_lines = []
try:
with open(file_path, 'r', encoding='utf-8') as file:
line_num = 0
for line in file:
stripped_line = line.strip()
if not stripped_line.startswith(comment_char):
file_lines.append(stripped_line)
line_num += 1
if 0 < lines <= line_num:
break
return file_lines
except Exception as e:
self.log_events(f"Error reading from file: {file_path}: {e}",
TroubleSgltn.Severity.ERROR,
True,
is_critical=is_critical)
if is_critical:
raise
return None
def copy_template_file(self, template: str, new_file: str, overwrite: bool = False, is_critical: bool = False) -> bool:
"""
Copies a template file to a new location with a new file name.
Parameters:
- template (str): The path and file name of the template file.
- new_file (str): The path and file name to be given to the new file.
- overwrite (bool): Whether to overwrite the file if it already exists (default is False).
- is_critical (bool): If True, exceptions will be raised; if False, exceptions will be handled in the method (default is False).
Returns:
- bool: True if the file was copied or already exists without needing overwrite, False if the copy fails.
"""
# Convert paths to Path objects
template_path = Path(template)
new_file_path = Path(new_file)
try:
# Check if the new file already exists
if new_file_path.exists() and not overwrite:
self.log_events(f"File '{new_file}' already exists and overwrite is set to False.",
TroubleSgltn.Severity.INFO)
return True # File exists, no overwrite, so we consider it a success
# Check if the template file exists
if not template_path.exists():
raise FileNotFoundError(f"Template file '{template}' not found.")
# Create parent directories for the new file if they don't exist
new_file_path.parent.mkdir(parents=True, exist_ok=True)
# Copy the file
try:
shutil.copy(template_path, new_file_path)
except Exception as e:
error_message = f"Failed to copy template '{template}' to '{new_file}': {e}"
self.log_events(error_message, TroubleSgltn.Severity.ERROR)
if is_critical:
raise Exception(error_message) from e
return False
self.log_events(f"Template file '{template}' successfully copied to '{new_file}'.",
TroubleSgltn.Severity.INFO)
return True # Success
except Exception as e:
if is_critical:
# Raise the original exception if the operation is critical
raise e
else:
# Handle the exception without raising
self.log_events(f"An error occurred: {e}", TroubleSgltn.Severity.ERROR)
return False # Failure
def read_file_contents(self, file_path, is_critical=False):
try:
with open(file_path, 'r', encoding='utf-8') as file:
contents = file.read()
return contents
except FileNotFoundError as e:
error_message = f"Error: The file '{file_path}' was not found."
if is_critical:
raise FileNotFoundError(error_message) from e
else:
self.log_events(error_message,
TroubleSgltn.Severity.ERROR)
except IOError as e:
error_message = f"Error: There was an issue reading the file '{file_path}'."
if is_critical:
raise IOError(error_message) from e
else:
self.log_events(error_message,
TroubleSgltn.Severity.ERROR)
except Exception as e:
error_message = f"An unexpected error occurred: {e}"
if is_critical:
raise
else:
self.log_events(error_message,
TroubleSgltn.Severity.ERROR)
return None
def write_string_to_file(self, data: str, file_path: Union[str,Path], is_critical: bool=False)->bool:
"""
Writes any string data to a file, makes the file if it doesn't exist.
Will also write empty strings, clearing the file content. Including JSON strings.
Args:
data (str): The string to write to the file.
file_path (str): The path and name of the file to write.
is_critical (bool): If True, raises exceptions for errors
Returns:
bool: True if the write operation was successful, False otherwise.
"""
try:
with open(file_path, 'w', encoding='utf-8') as file:
file.write(data)
return True
except (IOError, OSError) as e:
self.log_events(f"Plush - An error occurred while writing to file: {file_path}: {e}",
TroubleSgltn.Severity.WARNING,
True)
if is_critical:
raise
return False
def update_json_data(self, upd_data: dict, cfg_data: dict):
"""
Update config_data with new key-value pairs from update_data.
Args:
config_data (dict): The original configuration data.
update_data (dict): Data containing potential new keys and values.
Note:
Handle the 'style' list differently from the rest of the fields
it will have individual list items added or removed in alpha order.
upd_data style[] items that start with '-' will be removed
Returns:
dict: The updated configuration data.
"""
for key, value in upd_data.items():
if key != 'style':
cfg_data[key] = value
if 'style' in upd_data:
for item in upd_data['style']:
if item.startswith('-'):
#Remove item (strip the "-" prefix before removing item[1:])
remove_item = item[1:]
if remove_item in cfg_data['style']:
cfg_data['style'].remove(remove_item)
else:
#Add item(s) in alpha sort order
position = bisect.bisect_left(cfg_data['style'], item)
#check if the postion for the new item is not beyond eof, and that's it's not a duplicate
if position >= len(cfg_data['style']) or cfg_data['style'][position] != item:
cfg_data['style'].insert(position, item)
return cfg_data
def extract_from_dict(self, dict_data:dict, target:list)->dict:
"""
A recursive method that extracts data from a dict by finding keys that meet the criteria in the target argument, and
returns them and their values in a new dict. Duplicate keys have their values stored in a list under the key.
JSON strings are coerced to a dictionary object.
Args:
dict_data (dict): The dictionary to be searched for matching values
target (list): A list of search values
Returns:
A dictionary containg the dicts whose keys match the criteria and lists that either hold values from
duplicate keys, or had elements that matched the criteria
"""
def find_it(data, search_key, new_dict):
for k, v in data.items():
if k == search_key:
if k in new_dict: #key is a duplicate
if isinstance(new_dict[k], list): #if the new item is a list
new_dict[k].append(v)#append the list with the dupe key
else:
new_dict[k] = [new_dict[k], v] #Convert it to a list and append it
else:
new_dict[k] = v
elif isinstance(v, str):
v = v.strip()
if v.startswith('{') or v.startswith('['):
try:
parsed = json.loads(v)
find_it(parsed, search_key, new_dict)
except json.JSONDecodeError:
#Whoops it's not a JSON string
self.log_events(f"Attempt to convert string to dictionary failed, some data will be missing: {v}",
TroubleSgltn.Severity.WARNING,
True)
continue
elif isinstance(v, dict):
find_it(v, search_key, new_dict)
elif isinstance(v, list):
for i in v:
if i == search_key:
new_dict[k] = v
if isinstance(i, dict):
find_it(i, search_key, new_dict)
new_dict = {}
local_source = dict_data
if isinstance(target, list):
for search_key in target:
find_it(local_source, search_key, new_dict)
else:
self.log_events("'extract_from_dict', Incoming search terms were not a list object. Return empty dict.",
TroubleSgltn.Severity.WARNING,
True)
return new_dict
#**testing
def extract_with_translation(self, dict_data: dict, translate_keys: dict, min_prompt_len:int=1, alpha_pct:float=0.0, filter_phrase:str ="") -> dict:
"""
A recursive method that extracts and translates keys from a dict by finding keys that match those in the
translate_keys argument, and returns them with their values in a new dict using the friendly names.
Duplicate keys have their values stored in a list under the friendly name key. JSON strings are coerced to
dictionary objects. Possible Prompts have to meet the additional criteria of having a min length limit and
a max percent of numeric characters limit.
Args:
dict_data (dict): The dictionary to be searched.
translate_keys (dict): A dictionary with original keys as keys and friendly names as values.
min_prompt_len (int): The minimum length of a string to qualify as a Possible Prompt
alpha_pct (float): The minimum percentage of alpha characters (+ comma) to qualify as a Possible Prompt
filter_phrase (string): A string whose exact match must be present in order to qualify as a Possible Prompt
Returns:
A dictionary containing the translated keys and their values, with lists for duplicate keys or matched elements.
"""
new_dict = {}
def custom_sort(item):
key = item[0]
# Assign a high priority to 'Positive Prompts'
if key == 'Possible Prompts':
return (0, key)
elif key == "Seed":
return (1, key)
elif key == 'Source File':
return(3, key) #low priority for processing info
elif key == 'Processing Application':
return(3, key)
# Normal priority for everything else
return (2, key)
def process_and_divide(friendly_name, value):
# Initialize the regular expression pattern
pattern = None
processed_value = value # Default to the original value
# GPS Latitude
if "Latitude" in friendly_name and "Hemisphere" not in friendly_name:
pattern = re.compile(r'^(\d+)/(\d+)\s+(\d+)/(\d+)\s+(\d+)/(\d+)$')
# GPS Longitude
elif "Longitude" in friendly_name and "Hemisphere" not in friendly_name:
pattern = re.compile(r'^(\d+)/(\d+)\s+(\d+)/(\d+)\s+(\d+)/(\d+)$')
else:
pattern = re.compile(r'^(\d+)/(\d+)$')
# If a pattern was assigned, try matching it to the value
if pattern is not None:
match = pattern.match(value)
if match:
if "Latitude" in friendly_name or "Longitude" in friendly_name:
# Process as GPS coordinate
degrees, degrees_div, minutes, minutes_div, seconds, seconds_div = map(int, match.groups())
if degrees_div != 0 and minutes_div != 0 and seconds_div != 0:
decimal_degrees = degrees / degrees_div + minutes / minutes_div / 60 + seconds / seconds_div / 3600
processed_value = f"{decimal_degrees:.6f}°"
elif "Altitude" in friendly_name:
# Process as altitude
meters, meters_div = map(int, match.groups())
if meters_div != 0:
feet = (meters / meters_div) * 3.28084 # Convert meters to feet
processed_value = f"{feet:.2f} ft"
else:
processed_value = '0'
elif "Shutter Speed" in friendly_name:
# Process as shutter speed
numerator, denominator = map(int, match.groups())
if denominator != 0:
quotient = numerator / denominator
shutter_speed = 2**(-quotient)
if shutter_speed < 1:
# Convert to 1/x format for speeds faster than 1 second
reciprocal = round(1 / shutter_speed)
return f"1/{reciprocal} sec"
else:
# For 1 second or slower, simply round and add " sec"
return f"{round(shutter_speed,2)} sec"
else:
processed_value = '0' # Handling for zero denominator
elif "Aperture" in friendly_name:
# Process as aperture using the APEX standard
numerator, denominator = map(int, match.groups())
if denominator != 0:
quotient = numerator / denominator
aperture_value = math.sqrt(2**quotient)
processed_value = f"F{aperture_value:.2f}"
else:
processed_value = '0' # Handling for zero denominator
elif "Exposure Time" in friendly_name:
# Process as exposure time
numerator, denominator = map(int, match.groups())
if denominator != 0:
quotient = numerator / denominator
return f"{str(round(quotient,5))} sec"
else:
processed_value = '0' # Handling for zero denominator
else:
numerator, denominator = map(int, match.groups())
if denominator != 0:
quotient = numerator / denominator
return f"{str(round(quotient,2))}"
else:
processed_value = '0' # Handling for zero denominator
# If no match found, retain the original value but no additional action required here
# Return the processed value or the original if no changes were made
return processed_value
def filter_prompt_items(items, min_prompt_len, alpha_pct, filter_phrase):
"""Recursively filter potential prompts based on length and numeric character ratio criteria, including in nested lists."""
filtered_items = []
filter_phrase_lower = filter_phrase.lower()
def calculate_prompt_char_ratio(s: str) -> float:
"""Calculate the ratio of common prompt chars: alphabetical characters, spaces and commas to the string's total length."""
alpha_space_count = sum(c.isalpha() or c.isspace() or c== ',' for c in s)
return alpha_space_count / len(s) if s else 0
def filter_recursive(item):
if isinstance(item, str):
if len(item.strip()) >= min_prompt_len and calculate_prompt_char_ratio(item) >= alpha_pct and filter_phrase_lower in item.lower():
return item
elif isinstance(item, list):
filtered_sublist = [filter_recursive(subitem) for subitem in item]
filtered_sublist = [subitem for subitem in filtered_sublist if subitem is not None] # Remove None values
if filtered_sublist:
return filtered_sublist
return None
for item in items:
result = filter_recursive(item)
if result is not None:
filtered_items.append(result)
return filtered_items
def find_and_translate(data, translate_dict,friendly_name):
try:
if isinstance(data, dict):
for k, v in data.items():
if k in translate_dict: # Key matches one we're looking for
friendly_name = translate_dict[k] # Get the friendly name
# Special handling for 'Possible Prompt' and Exif info
if friendly_name == 'Possible Prompts':
if isinstance(v, list):
v = filter_prompt_items(v, min_prompt_len, alpha_pct, filter_phrase)
elif isinstance(v, str):
# Wrap the string in a list to use the same filtering logic
filtered_result = filter_prompt_items([v], min_prompt_len, alpha_pct, filter_phrase)
v = filtered_result[0] if filtered_result else None # Unwrap if not empty
elif "Exif" in k or "Xmp" in k:
# Apply division processing based on perform_math flag
if isinstance(v, str) and v.strip():
v = process_and_divide(friendly_name, v)
#Append items if key is a duplicate
if friendly_name in new_dict: # Key is a duplicate
if isinstance(new_dict[friendly_name], list):
new_dict[friendly_name].extend(v if isinstance(v, list) else [v])
else:
new_dict[friendly_name] = [new_dict[friendly_name], v] if isinstance(v, list) else [new_dict[friendly_name]] + [v]
else:
if v:
new_dict[friendly_name] = v
elif isinstance(v, str) and v: # Check for nested JSON strings
if v.startswith('{') or v.startswith('['):
try:
parsed = json.loads(v)
find_and_translate(parsed, translate_dict, friendly_name)
except json.JSONDecodeError:
self.log_events(f"JSON conversion failed: {v}",
TroubleSgltn.Severity.WARNING,
True)
elif isinstance(v, dict) and v: # Nested dict
find_and_translate(v, translate_dict, friendly_name)
elif isinstance(v, list) and v: # List, could contain dicts
for i in v:
if isinstance(i, dict):
find_and_translate(i, translate_dict, friendly_name)
elif isinstance(i,list):
find_and_translate(i, translate_dict,friendly_name)
elif isinstance(v, tuple) and v:
processed_elements = []
for i in v:
if isinstance(i, dict):
# Create a temporary dictionary to hold processed nested dictionaries
temp_dict = {}
find_and_translate(i, translate_dict, friendly_name)
processed_elements.append(temp_dict)
elif k in translate_dict:
# Directly append simple values within the tuple
processed_elements.append(i)
# Append the processed elements to new_dict under the corresponding friendly name
# No need for a second check for k in translate_keys here
if k in translate_dict:
friendly_name = translate_dict[k]
self.log_events(f"Found tuple: {friendly_name}: {processed_elements}",
is_trouble=True)
new_dict[friendly_name] = tuple(processed_elements)
elif isinstance(data, list):
for item in data:
find_and_translate(item, translate_keys,friendly_name) # Recursively handle items in lists
except Exception as e:
self.log_events(f"An unexpected error occurred during data translation {str(e)}",
TroubleSgltn.Severity.ERROR,
True)
#End of find_and_translate
if not isinstance(dict_data, dict) or not isinstance(translate_keys, dict):
self.log_events("Improper data object passed to 'extract_with_translation', translation halted!",
TroubleSgltn.Severity.ERROR,
True)
return new_dict #Return an empty dict if passed objects are invalid
friendly_name = ""
find_and_translate(dict_data, translate_keys,friendly_name)
sorted_items = dict(sorted(new_dict.items(), key=custom_sort))
return sorted_items
def prep_formatted_file(self, parsed_dict):
formatted_file = ""
bullet = '➤ ' # Using a bullet point for headings \u27a4
sub_bullet = ' • ' # Adding indentation before the sub_bullet for items \u2022
sub_open_bracket = '['
sub_close_bracket = ']'
newlines = '\n'
def process_item(key, item):
"""Flatten and format items from lists, tuples, or simple values, adding them to formatted_file."""
if isinstance(item, (str, int, float)) and str(item).strip():
if not 'Possible Prompts' in key:
return f" {sub_open_bracket}{item}{sub_close_bracket}"
else:
return f"{newlines}{sub_bullet}{item}{newlines}"
elif isinstance(item, (list, tuple)):
# Flatten nested lists/tuples and format their items
return ''.join(process_item(key, subitem) for subitem in item)
else:
return '' # Return an empty string for unsupported types or to skip processing
for key, value in parsed_dict.items():
formatted_file += f"{newlines}{newlines}{bullet} {key}:"