From d98e7f2da427940cb1d3e53e81ad427bf143ca3d Mon Sep 17 00:00:00 2001 From: Kyle Goodrick Date: Fri, 27 May 2022 08:54:08 -0600 Subject: [PATCH 1/5] Add parsing for Fourier analysis. Add pandas requirement for storing result. Use context manager for reading file. --- PyLTSpice/LTSteps.py | 254 ++++++++++++++++++++++++++++--------------- doc/requirements.txt | 1 + 2 files changed, 165 insertions(+), 90 deletions(-) diff --git a/PyLTSpice/LTSteps.py b/PyLTSpice/LTSteps.py index 5873b75..49ba4ea 100644 --- a/PyLTSpice/LTSteps.py +++ b/PyLTSpice/LTSteps.py @@ -83,6 +83,7 @@ import re import os import sys +import pandas as pd from collections import OrderedDict from typing import Union, Iterable, List from PyLTSpice.detect_encoding import detect_encoding @@ -320,7 +321,6 @@ class LTSpiceLogReader(object): def __init__(self, log_filename: str, read_measures=True, step_set={}): self.logname = log_filename self.encoding = detect_encoding(log_filename, "Circuit:") - fin = open(log_filename, 'r', encoding=self.encoding) self.step_count = len(step_set) self.stepset = step_set.copy() # A copy is done since the dictionary is a mutable object. # Changes in step_set would be propagated to object on the call @@ -333,105 +333,179 @@ def __init__(self, log_filename: str, read_measures=True, step_set={}): re.IGNORECASE) message("Processing LOG file", log_filename) - line = fin.readline() - - while line: - if line.startswith(".step"): - # message(line) - self.step_count += 1 - tokens = line.strip('\r\n').split(' ') - for tok in tokens[1:]: - lhs, rhs = tok.split("=") - # Try to convert to int or float - rhs = try_convert_value(rhs) - - ll = self.stepset.get(lhs, None) - if ll: - ll.append(rhs) + with open(log_filename, 'r', encoding=self.encoding) as fin: + line = fin.readline() + + while line: + if line.startswith("N-Period"): + # Read number of periods + n_periods = int(line.strip('\r\n').split("=")[-1]) + # Read waveform name + line = enc_norm(fin.readline()).strip('\r\n') + waveform = line.split(" of ")[-1] + # Read DC component + line = enc_norm(fin.readline()).strip('\r\n') + dc_component = float(line.split(':')[-1]) + # Skip blank line + fin.readline() + # Skip two header lines + fin.readline() + fin.readline() + + harmonic_lines = [] + while True: + line = enc_norm(fin.readline()).strip('\r\n') + if line.startswith("Total Harmonic"): + # Find THD + thd = float(re.search(r"\d+.\d+", line).group()) + break + else: + harmonic_lines.append(line.replace("°","")) + + # Create Table + columns = [ + 'Harmonic Number', + 'Frequency [Hz]', + 'Fourier Component', + 'Normalized Component', + 'Phase [degree]', + 'Normalized Phase [degree]' + ] + harmonics_df = pd.DataFrame([r.split('\t') for r in harmonic_lines], columns=columns) + # Convert to numeric + harmonics_df = harmonics_df.apply(pd.to_numeric, errors='ignore') + + # Find Fundamental Frequency + frequency = harmonics_df['Frequency [Hz]'][0] + + # Save data related to this fourier analysis in a dictionary + data_dict = { + 'dc': dc_component, + 'thd': thd, + 'harmonics': harmonics_df + } + + # Find the dictionary that stores fourier data or create it if it does not exist + fourier_dict: dict = self.dataset.get('fourier', None) + if fourier_dict is None: + self.dataset['fourier'] = {} + fourier_dict = self.dataset['fourier'] + + # Find the dict that stores data for this frequency or create it if it does not exist + frequency_dict: dict = fourier_dict.get(frequency, None) + if frequency_dict is None: + fourier_dict[frequency] = {} + frequency_dict = fourier_dict[frequency] + + # Find the dict that stores data for this number of periods or create it if it does not exist + period_dict: dict = frequency_dict.get(n_periods, None) + if period_dict is None: + frequency_dict[n_periods] = {} + period_dict = frequency_dict[n_periods] + + # Find the list that stores data for this waveform or create it if it does not exist + waveform_list: list = period_dict.get(waveform, None) + if waveform_list is None: + period_dict[waveform] = [] + waveform_list = period_dict[waveform] + + # Add the data to the list + waveform_list.append(data_dict) + + if line.startswith(".step"): + # message(line) + self.step_count += 1 + tokens = line.strip('\r\n').split(' ') + for tok in tokens[1:]: + lhs, rhs = tok.split("=") + # Try to convert to int or float + rhs = try_convert_value(rhs) + + ll = self.stepset.get(lhs, None) + if ll: + ll.append(rhs) + else: + self.stepset[lhs] = [rhs] + + elif line.startswith("Measurement:"): + if not read_measures: + fin.close() + return else: - self.stepset[lhs] = [rhs] + break # Jumps to the section that reads measurements + + if self.step_count == 0: # then there are no steps, + # there are only measures taken in the format parameter: measurement + # A few examples of readings + # vout_rms: RMS(v(out))=1.41109 FROM 0 TO 0.001 => Interval + # vin_rms: RMS(v(in))=0.70622 FROM 0 TO 0.001 => Interval + # gain: vout_rms/vin_rms=1.99809 => Parameter + # vout1m: v(out)=-0.0186257 at 0.001 => Point + match = regx.match(line) + if match: + # Get the data + dataname = match.group('name') + if match.group('from'): + headers = [dataname, dataname + "_FROM", dataname + "_TO"] + measurements = [match.group('value'), match.group('from'), match.group('to')] + elif match.group('at'): + headers = [dataname, dataname + "_at"] + measurements = [match.group('value'), match.group('at')] + else: + headers = [dataname] + measurements = [match.group('value')] - elif line.startswith("Measurement:"): - if not read_measures: - fin.close() - return - else: - break # Jumps to the section that reads measurements - - if self.step_count == 0: # then there are no steps, - # there are only measures taken in the format parameter: measurement - # A few examples of readings - # vout_rms: RMS(v(out))=1.41109 FROM 0 TO 0.001 => Interval - # vin_rms: RMS(v(in))=0.70622 FROM 0 TO 0.001 => Interval - # gain: vout_rms/vin_rms=1.99809 => Parameter - # vout1m: v(out)=-0.0186257 at 0.001 => Point - match = regx.match(line) - if match: - # Get the data - dataname = match.group('name') - if match.group('from'): - headers = [dataname, dataname + "_FROM", dataname + "_TO"] - measurements = [match.group('value'), match.group('from'), match.group('to')] - elif match.group('at'): - headers = [dataname, dataname + "_at"] - measurements = [match.group('value'), match.group('at')] - else: - headers = [dataname] - measurements = [match.group('value')] - - for k, title in enumerate(headers): - self.dataset[title] = [ - try_convert_value(measurements[k])] # need to be a list for compatibility + for k, title in enumerate(headers): + self.dataset[title] = [ + try_convert_value(measurements[k])] # need to be a list for compatibility line = fin.readline() - # message("Reading Measurements") - dataname = None - - headers = [] # Initializing an empty parameters - measurements = [] - while line: - line = line.strip('\r\n') - if line.startswith("Measurement: "): - if dataname: # If previous measurement was saved - # store the info - if len(measurements): - message("Storing Measurement %s (count %d)" % (dataname, len(measurements))) - for k, title in enumerate(headers): - self.dataset[title] = [line[k] for line in measurements] - headers = [] - measurements = [] - dataname = line[13:] # text which is after "Measurement: ". len("Measurement: ") -> 13 - message("Reading Measurement %s" % line[13:]) - else: - tokens = line.split("\t") - if len(tokens) >= 2: - try: - int(tokens[0]) # This instruction only serves to trigger the exception - meas = tokens[1:] # [float(x) for x in tokens[1:]] - measurements.append(try_convert_values(meas)) - self.measure_count += 1 - except ValueError: - if len(tokens) >= 3 and (tokens[2] == "FROM" or tokens[2] == 'at'): - tokens[2] = dataname + '_' + tokens[2] - if len(tokens) >= 4 and tokens[3] == "TO": - tokens[3] = dataname + "_TO" - headers = [dataname] + tokens[2:] + # message("Reading Measurements") + dataname = None + + headers = [] # Initializing an empty parameters + measurements = [] + while line: + line = line.strip('\r\n') + if line.startswith("Measurement: "): + if dataname: # If previous measurement was saved + # store the info + if len(measurements): + message("Storing Measurement %s (count %d)" % (dataname, len(measurements))) + for k, title in enumerate(headers): + self.dataset[title] = [line[k] for line in measurements] + headers = [] measurements = [] + dataname = line[13:] # text which is after "Measurement: ". len("Measurement: ") -> 13 + message("Reading Measurement %s" % line[13:]) else: - message("->", line) + tokens = line.split("\t") + if len(tokens) >= 2: + try: + int(tokens[0]) # This instruction only serves to trigger the exception + meas = tokens[1:] # [float(x) for x in tokens[1:]] + measurements.append(try_convert_values(meas)) + self.measure_count += 1 + except ValueError: + if len(tokens) >= 3 and (tokens[2] == "FROM" or tokens[2] == 'at'): + tokens[2] = dataname + '_' + tokens[2] + if len(tokens) >= 4 and tokens[3] == "TO": + tokens[3] = dataname + "_TO" + headers = [dataname] + tokens[2:] + measurements = [] + else: + message("->", line) line = fin.readline() # advance to the next line - # storing the last data into the dataset - message("Storing Measurement %s" % dataname) - if len(measurements): - for k, title in enumerate(headers): - self.dataset[title] = [line[k] for line in measurements] + # storing the last data into the dataset + message("Storing Measurement %s" % dataname) + if len(measurements): + for k, title in enumerate(headers): + self.dataset[title] = [line[k] for line in measurements] - message("%d measurements" % len(self.dataset)) - message("Identified %d steps, read %d measurements" % (self.step_count, self.measure_count)) - - fin.close() + message("%d measurements" % len(self.dataset)) + message("Identified %d steps, read %d measurements" % (self.step_count, self.measure_count)) def __getitem__(self, key): """ diff --git a/doc/requirements.txt b/doc/requirements.txt index 4dc1ee7..cbcc8fb 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -1,6 +1,7 @@ pip Sphinx numpy>=1.14.0 +pandas matplotlib insipid-sphinx-theme autodocsumm \ No newline at end of file From a499d5f026a50f5eb7a53452da9df49a053ec4fb Mon Sep 17 00:00:00 2001 From: Kyle Goodrick Date: Wed, 6 Jul 2022 12:27:28 -0600 Subject: [PATCH 2/5] Ignore egg-info in git --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 40bb4e3..a7d7450 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ .vscode/ -__pycache__/ \ No newline at end of file +__pycache__/ +PyLTSpice.egg-info \ No newline at end of file From 9b0ff97b4bc3da689c5d43ba7eba3d395abe0f78 Mon Sep 17 00:00:00 2001 From: Kyle Goodrick Date: Wed, 6 Jul 2022 15:22:10 -0600 Subject: [PATCH 3/5] Remove enc_norm and fix tab --- PyLTSpice/LTSteps.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/PyLTSpice/LTSteps.py b/PyLTSpice/LTSteps.py index 49ba4ea..6a4e679 100644 --- a/PyLTSpice/LTSteps.py +++ b/PyLTSpice/LTSteps.py @@ -334,17 +334,17 @@ def __init__(self, log_filename: str, read_measures=True, step_set={}): message("Processing LOG file", log_filename) with open(log_filename, 'r', encoding=self.encoding) as fin: - line = fin.readline() + line = fin.readline() while line: if line.startswith("N-Period"): # Read number of periods n_periods = int(line.strip('\r\n').split("=")[-1]) # Read waveform name - line = enc_norm(fin.readline()).strip('\r\n') + line = fin.readline().strip('\r\n') waveform = line.split(" of ")[-1] # Read DC component - line = enc_norm(fin.readline()).strip('\r\n') + line = fin.readline().strip('\r\n') dc_component = float(line.split(':')[-1]) # Skip blank line fin.readline() @@ -354,7 +354,7 @@ def __init__(self, log_filename: str, read_measures=True, step_set={}): harmonic_lines = [] while True: - line = enc_norm(fin.readline()).strip('\r\n') + line = fin.readline().strip('\r\n') if line.startswith("Total Harmonic"): # Find THD thd = float(re.search(r"\d+.\d+", line).group()) From 49b200ddf672f86f378213c806848e9356393f14 Mon Sep 17 00:00:00 2001 From: Kyle Goodrick Date: Wed, 6 Jul 2022 16:24:41 -0600 Subject: [PATCH 4/5] Fix encoding check and indent --- PyLTSpice/LTSteps.py | 4 ++-- PyLTSpice/detect_encoding.py | 29 +++++++++++++++++------------ 2 files changed, 19 insertions(+), 14 deletions(-) diff --git a/PyLTSpice/LTSteps.py b/PyLTSpice/LTSteps.py index 6a4e679..5a803e6 100644 --- a/PyLTSpice/LTSteps.py +++ b/PyLTSpice/LTSteps.py @@ -458,7 +458,7 @@ def __init__(self, log_filename: str, read_measures=True, step_set={}): for k, title in enumerate(headers): self.dataset[title] = [ try_convert_value(measurements[k])] # need to be a list for compatibility - line = fin.readline() + line = fin.readline() # message("Reading Measurements") dataname = None @@ -496,7 +496,7 @@ def __init__(self, log_filename: str, read_measures=True, step_set={}): else: message("->", line) - line = fin.readline() # advance to the next line + line = fin.readline() # advance to the next line # storing the last data into the dataset message("Storing Measurement %s" % dataname) diff --git a/PyLTSpice/detect_encoding.py b/PyLTSpice/detect_encoding.py index fa96fd6..2a90412 100644 --- a/PyLTSpice/detect_encoding.py +++ b/PyLTSpice/detect_encoding.py @@ -30,16 +30,21 @@ def detect_encoding(file_path, expected_str: str = '') -> str: :return: detected encoding :rtype: str """ - if expected_str: # if expected string is not empty - f = open(file_path, 'rb') # Open the file as a binary file - tmp = f.read(2 * len(expected_str)) # Read the beginning of the contents of the file - f.close() - for encoding in ('utf-8', 'utf_16_le'): # Add other possible encodings - if tmp.decode(encoding).startswith(expected_str): - return encoding - raise UnicodeError("Unable to detect log file encoding") + for encoding in ('cp1252', 'cp1250', 'utf-8', 'utf_16_le'): + try: + with open(file_path, 'r', encoding=encoding) as f: + lines = f.readlines() + f.seek(0) + except UnicodeDecodeError: + # This encoding didn't work, let's try again + continue + else: + if expected_str: + if not lines[0].startswith(expected_str): + # File did not start with expected string + # Try again with a different encoding (This is unlikely to resolve the issue) + continue + + return encoding else: - f = open(file_path, 'rb') # Open the file as a binary file - tmp = f.read(2) # Read the beginning of the contents of the file - f.close() - return 'utf-8' if tmp[1] != 0 else 'utf_16_le' + raise UnicodeError("Unable to detect log file encoding") From fcb746ca075b5978502658ad9a83d8cdb32975fa Mon Sep 17 00:00:00 2001 From: Kyle Goodrick Date: Tue, 26 Jul 2022 11:33:09 -0400 Subject: [PATCH 5/5] Fix complex parser so it can handle linear values --- PyLTSpice/LTSteps.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/PyLTSpice/LTSteps.py b/PyLTSpice/LTSteps.py index 5a803e6..5709687 100644 --- a/PyLTSpice/LTSteps.py +++ b/PyLTSpice/LTSteps.py @@ -101,7 +101,7 @@ class LTComplex(object): """ Class to represent complex numbers as exported by LTSpice """ - complex_match = re.compile(r"\((?P.*)dB,(?P.*)°\)") + complex_match = re.compile(r"\((?P[^dB]*)(dB)?,(?P.*)°\)") def __init__(self, strvalue): a = self.complex_match.match(strvalue)