|
| 1 | +import os |
| 2 | +import sys |
| 3 | +import sqlite3 |
| 4 | +import csv |
| 5 | +import re |
| 6 | +import pandas as pd |
| 7 | +from openpyxl import Workbook |
| 8 | + |
| 9 | +def run_sqlite_script(db, sql_script): |
| 10 | + |
| 11 | + try: |
| 12 | + conn = sqlite3.connect(db) |
| 13 | + cursor = conn.cursor() |
| 14 | + with open(sql_script, 'r') as script_file: |
| 15 | + script_content = script_file.read() |
| 16 | + cursor.executescript(script_content) |
| 17 | + conn.commit() |
| 18 | + return True |
| 19 | + except Exception as e: |
| 20 | + logmsg(error(), "run_sqlite_script", f"Failed to run the script on '{db}': {e}") |
| 21 | + return False |
| 22 | + finally: |
| 23 | + if 'conn' in locals(): |
| 24 | + conn.close() |
| 25 | + |
| 26 | +def write_to_dat_file(table_name, columns, data, output_file): |
| 27 | + |
| 28 | + try: |
| 29 | + with open(output_file, 'w') as outfile: |
| 30 | + outfile.write(f"# Data for table: {table_name}\n") |
| 31 | + outfile.write(f"# Columns: {', '.join(columns)}\n") |
| 32 | + for row in data: |
| 33 | + formatted_row = " ".join(map(str, row)) |
| 34 | + outfile.write(f"{formatted_row}\n") |
| 35 | + logmsg(info(), "write_to_dat_file", f"Data written to '{output_file}'") |
| 36 | + except Exception as e: |
| 37 | + logmsg(error(), "write_to_dat_file", f"Failed to write data to '{output_file}': {e}") |
| 38 | + |
| 39 | +def export_to_excel(input_db, output_excel): |
| 40 | + """Export all tables from an SQLite database to an Excel file.""" |
| 41 | + try: |
| 42 | + # Check if the database file exists |
| 43 | + if not os.path.exists(input_db): |
| 44 | + logmsg(error(), "Database Error", f"Input database '{input_db}' not found.") |
| 45 | + return False |
| 46 | + |
| 47 | + conn = sqlite3.connect(input_db) |
| 48 | + logmsg(info(), "Database", f"Connected to database '{input_db}'.") |
| 49 | + |
| 50 | + with pd.ExcelWriter(output_excel, engine='openpyxl') as writer: |
| 51 | + cursor = conn.cursor() |
| 52 | + cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") |
| 53 | + tables = [row[0] for row in cursor.fetchall()] |
| 54 | + |
| 55 | + for table_name in tables: |
| 56 | + logmsg(info(), "Database", f"Exporting table '{table_name}' to Excel sheet.") |
| 57 | + table_data = pd.read_sql_query(f"SELECT * FROM {table_name}", conn) |
| 58 | + table_data.to_excel(writer, sheet_name=table_name, index=False) |
| 59 | + |
| 60 | + logmsg(info(), "Excel Export", f"Successfully exported database '{input_db}' to Excel file '{output_excel}'.") |
| 61 | + return True |
| 62 | + |
| 63 | + except Exception as e: |
| 64 | + logmsg(error(), "Excel Export Error", f"Failed to export '{input_db}' to '{output_excel}': {str(e)}") |
| 65 | + return False |
| 66 | + finally: |
| 67 | + if 'conn' in locals(): |
| 68 | + conn.close() |
| 69 | + logmsg(info(), "Database", "Database connection closed.") |
| 70 | +def warning(): |
| 71 | + return " warning => " |
| 72 | + |
| 73 | +def change(): |
| 74 | + return "DIFFERS ===> " |
| 75 | + |
| 76 | +def error(): |
| 77 | + return "ERROR =====> " |
| 78 | + |
| 79 | +def info(): |
| 80 | + return " " |
| 81 | + |
| 82 | +def diagnostic(): |
| 83 | + return "diagnostic " |
| 84 | + |
| 85 | +def logmsg(prefix, *args): |
| 86 | + |
| 87 | + message = args[-1] |
| 88 | + prefixes = [prefix] |
| 89 | + for part in args[:-1]: |
| 90 | + if part == '': |
| 91 | + continue |
| 92 | + part = re.sub(r'( *)$', r':\1 ', part) |
| 93 | + prefixes.append(part) |
| 94 | + full_prefix = ''.join(prefixes) |
| 95 | + for line in message.split('\n'): |
| 96 | + print(f"{full_prefix}{line}") |
| 97 | + |
| 98 | +def apply_transform(value, round_prec): |
| 99 | + transformed_value = value * 1.12345678987654321 |
| 100 | + if round_prec > 0: |
| 101 | + transformed_value = float(f"{transformed_value:.15g}") |
| 102 | + transformed_value = float(f"{transformed_value:.{round_prec}g}") |
| 103 | + return transformed_value |
| 104 | + |
| 105 | +def ompp_tables_to_csv(db, dir, round_prec=0, zero_fuzz=1e-15, do_original=0, do_transformed=0): |
| 106 | + rounding_on = False |
| 107 | + if round_prec > 0: |
| 108 | + rounding_on = True |
| 109 | + |
| 110 | + dir = dir.replace('\\', '/') |
| 111 | + |
| 112 | + dir_original = f"{dir}/original" |
| 113 | + dir_transformed = f"{dir}/transformed" |
| 114 | + |
| 115 | + outdirs = [dir] |
| 116 | + if do_original: |
| 117 | + outdirs.append(dir_original) |
| 118 | + if do_transformed: |
| 119 | + outdirs.append(dir_transformed) |
| 120 | + |
| 121 | + for fldr in outdirs: |
| 122 | + if not os.path.isdir(fldr): |
| 123 | + try: |
| 124 | + os.makedirs(fldr) |
| 125 | + except Exception as e: |
| 126 | + logmsg(error(), f"unable to create directory {fldr}") |
| 127 | + return 1 |
| 128 | + |
| 129 | + try: |
| 130 | + conn = sqlite3.connect(db) |
| 131 | + cursor = conn.cursor() |
| 132 | + except sqlite3.Error as e: |
| 133 | + logmsg(error(), f"Cannot connect to database {db}: {e}") |
| 134 | + return 1 |
| 135 | + |
| 136 | + try: |
| 137 | + cursor.execute("Select table_name, table_rank From table_dic Order By table_name;") |
| 138 | + tables_data = cursor.fetchall() |
| 139 | + except sqlite3.Error as e: |
| 140 | + logmsg(error(), f"Failed to retrieve table list: {e}") |
| 141 | + conn.close() |
| 142 | + return 1 |
| 143 | + |
| 144 | + tables = [] |
| 145 | + ranks = {} |
| 146 | + for col1, col2 in tables_data: |
| 147 | + tables.append(col1) |
| 148 | + ranks[col1] = col2 |
| 149 | + |
| 150 | + for table in tables: |
| 151 | + rank = ranks[table] |
| 152 | + order_clause = "Order By " + ','.join([f"Dim{dim}" for dim in range(rank+1)]) |
| 153 | + select_query = f"Select * From {table} {order_clause};" |
| 154 | + |
| 155 | + try: |
| 156 | + cursor.execute(select_query) |
| 157 | + rows = cursor.fetchall() |
| 158 | + columns = [description[0] for description in cursor.description] |
| 159 | + except sqlite3.Error as e: |
| 160 | + logmsg(error(), f"Failed to retrieve data from table {table}: {e}") |
| 161 | + conn.close() |
| 162 | + return 1 |
| 163 | + |
| 164 | + if len(rows) == 0: |
| 165 | + continue |
| 166 | + |
| 167 | + out_csv = f"{dir}/{table}.csv" |
| 168 | + out_csv_original = f"{dir_original}/{table}.csv" if do_original else None |
| 169 | + out_csv_transformed = f"{dir_transformed}/{table}.csv" if do_transformed else None |
| 170 | + |
| 171 | + try: |
| 172 | + with open(out_csv, 'w', newline='') as outfile: |
| 173 | + writer = csv.writer(outfile) |
| 174 | + writer.writerow(columns) |
| 175 | + |
| 176 | + if do_original: |
| 177 | + out_original = open(out_csv_original, 'w', newline='') |
| 178 | + writer_original = csv.writer(out_original) |
| 179 | + writer_original.writerow(columns) |
| 180 | + if do_transformed: |
| 181 | + out_transformed = open(out_csv_transformed, 'w', newline='') |
| 182 | + writer_transformed = csv.writer(out_transformed) |
| 183 | + writer_transformed.writerow(columns) |
| 184 | + |
| 185 | + for row in rows: |
| 186 | + row = list(row) |
| 187 | + if len(row) == 0: |
| 188 | + continue |
| 189 | + if row[-1] is not None and row[-1] != '': |
| 190 | + value = row[-1] |
| 191 | + try: |
| 192 | + original_value = float(value) |
| 193 | + except ValueError: |
| 194 | + original_value = value |
| 195 | + value = value |
| 196 | + transformed_value = value |
| 197 | + else: |
| 198 | + if abs(original_value) <= zero_fuzz: |
| 199 | + value = 0.0 |
| 200 | + else: |
| 201 | + value = original_value |
| 202 | + if rounding_on: |
| 203 | + value = float(f"{value:.15g}") |
| 204 | + value = float(f"{value:.{round_prec}g}") |
| 205 | + if do_transformed: |
| 206 | + transformed_value = apply_transform(value, round_prec) |
| 207 | + else: |
| 208 | + transformed_value = value |
| 209 | + |
| 210 | + value_str = f"{value:.15g}" |
| 211 | + value_str = re.sub(r'e([-+])0(\d\d)', r'e\1\2', value_str) |
| 212 | + if do_original: |
| 213 | + original_value_str = f"{original_value:.15g}" |
| 214 | + original_value_str = re.sub(r'e([-+])0(\d\d)', r'e\1\2', original_value_str) |
| 215 | + if do_transformed: |
| 216 | + transformed_value_str = f"{transformed_value:.15g}" |
| 217 | + transformed_value_str = re.sub(r'e([-+])0(\d\d)', r'e\1\2', transformed_value_str) |
| 218 | + row_out = row[:-1] + [value_str] |
| 219 | + writer.writerow(row_out) |
| 220 | + if do_original: |
| 221 | + row_original = row[:-1] + [original_value_str] |
| 222 | + writer_original.writerow(row_original) |
| 223 | + if do_transformed: |
| 224 | + row_transformed = row[:-1] + [transformed_value_str] |
| 225 | + writer_transformed.writerow(row_transformed) |
| 226 | + else: |
| 227 | + writer.writerow(row) |
| 228 | + if do_original: |
| 229 | + writer_original.writerow(row) |
| 230 | + if do_transformed: |
| 231 | + writer_transformed.writerow(row) |
| 232 | + |
| 233 | + if do_original: |
| 234 | + out_original.close() |
| 235 | + if do_transformed: |
| 236 | + out_transformed.close() |
| 237 | + |
| 238 | + except Exception as e: |
| 239 | + logmsg(error(), f"Error processing table {table}: {e}") |
| 240 | + conn.close() |
| 241 | + return 1 |
| 242 | + |
| 243 | + conn.close() |
| 244 | + return 0 |
| 245 | + |
| 246 | +def read_parameters_from_csv(csv_file): |
| 247 | + |
| 248 | + parameters = {} |
| 249 | + with open(csv_file, 'r', newline='') as infile: |
| 250 | + reader = csv.DictReader(infile) |
| 251 | + fieldnames = reader.fieldnames |
| 252 | + |
| 253 | + required_fields = {'name', 'type', 'value'} |
| 254 | + if not required_fields.issubset(set(fieldnames)): |
| 255 | + raise ValueError("CSV file must contain 'name', 'type', and 'value' columns") |
| 256 | + |
| 257 | + is_array = 'index' in fieldnames |
| 258 | + |
| 259 | + for row in reader: |
| 260 | + name = row['name'] |
| 261 | + param_type = row['type'] |
| 262 | + value = row['value'] |
| 263 | + index = row.get('index') |
| 264 | + |
| 265 | + if name not in parameters: |
| 266 | + parameters[name] = {'type': param_type, 'values': [], 'indices': [] if is_array else None} |
| 267 | + |
| 268 | + if is_array: |
| 269 | + if index is None: |
| 270 | + raise ValueError(f"Missing 'index' for parameter '{name}'") |
| 271 | + parameters[name]['indices'].append(int(index)) |
| 272 | + parameters[name]['values'].append(value) |
| 273 | + else: |
| 274 | + parameters[name]['values'] = value |
| 275 | + |
| 276 | + return parameters |
| 277 | + |
| 278 | +def write_parameters_to_dat(parameters, dat_file): |
| 279 | + """ |
| 280 | + Writes parameters to a DAT file. |
| 281 | +
|
| 282 | + Parameters is a dictionary as returned by read_parameters_from_csv. |
| 283 | + """ |
| 284 | + with open(dat_file, 'w') as outfile: |
| 285 | + for name, param in parameters.items(): |
| 286 | + param_type = param['type'] |
| 287 | + values = param['values'] |
| 288 | + indices = param['indices'] |
| 289 | + |
| 290 | + if indices is None: |
| 291 | + outfile.write(f"{param_type} {name} = {values};\n") |
| 292 | + else: |
| 293 | + |
| 294 | + max_index = max(indices) |
| 295 | + array_size = max_index + 1 |
| 296 | + sorted_values = [None] * array_size |
| 297 | + for idx, val in zip(indices, values): |
| 298 | + sorted_values[idx] = val |
| 299 | + outfile.write(f"{param_type} {name}[{array_size}] = {{\n") |
| 300 | + for i, val in enumerate(sorted_values): |
| 301 | + outfile.write(f" {val}") |
| 302 | + if i < array_size -1: |
| 303 | + outfile.write(",\n") |
| 304 | + else: |
| 305 | + outfile.write("\n") |
| 306 | + outfile.write("};\n") |
| 307 | + return |
0 commit comments