http://sites.ieee.org/pes-testfeeders/resources/
The current IEEE test cases are focused on North American style systems; however it is common outside of North America to see low-voltage distribution systems, both radial and meshed. It is important to make sure that tools support both dominant styles of distribution system configuration. This test case seeks to fill a benchmark gap by presenting a number of common low-voltage configurations. This circuit also introduces quasi-static time series simulations.
IEEE European LV network is a generic 0.416 kV network serviced by one 0.8 MVA MV/LV transformer and a 11kV external grid. The network supplies 906 LV buses and 55 1-PH loads.
In this example the time series data from benchmark is used to perform a time series load flow analysis in pandapower
The network data from csv is used to create Powerfactory and pandapower networks.
The Sum total of power and maximum voltage is plotted with respect to 1440 time steps( 1 min interval) during the day:
import os
import numpy as np
import pandas as pd
import tempfile
import pandapower as pp
from pandapower.control import ConstControl
from pandapower.timeseries import DFData
from pandapower.timeseries import OutputWriter
from pandapower.timeseries.run_time_series import run_timeseries
from pandapower.networks import ieee_european_lv_asymmetric
from pandapower import pp_dir
parent = os.path.join(pp_dir,'test','test_files')
path = os.path.join(parent, "European_LV_CSV")
load_path = os.path.join(path, "Load_Profiles")
def remove_comments(f):
'''Pass comments'''
start=f.seek(0)
for index in range(5):
start=f.tell()
if f.readline().startswith('#'):
continue
else:
break
f.seek(start)
return f
load_csv = os.path.join(path, "Loads.csv")
with open (load_csv,'r') as f:
f = remove_comments(f)
loads = pd.read_csv(f)
f.close()
load_shapes = os.path.join(path, "LoadShapes.csv")
with open (load_shapes,'r') as f:
f = remove_comments(f)
loadshapes = pd.read_csv(f)
f.close()
import pandapower.plotting as plot
net =ieee_european_lv_asymmetric('on_peak_566')
%matplotlib inline
import numpy as np
try:
import seaborn
colors = seaborn.color_palette()
except:
colors = ["b", "g", "r", "c", "y"]
bc = plot.create_bus_collection(net, net.bus.index, size=.2, color=colors[0], zorder=10)
tlc, tpc = plot.create_trafo_collection(net, net.trafo.index, color="c",size =1.5 )
lcd = plot.create_line_collection(net, net.line.index, color="grey", linewidths=0.5, use_bus_geodata=True)
sc = plot.create_bus_collection(net, net.ext_grid.bus.values, patch_type="rect", size=.75, color="c", zorder=11)
ldA = plot.create_bus_collection(net, net.asymmetric_load.bus.values[np.where(net.asymmetric_load.p_a_mw >0)], patch_type="poly3", size=.75, color="r", zorder=11)
ldB = plot.create_bus_collection(net, net.asymmetric_load.bus.values[np.where(net.asymmetric_load.p_b_mw >0)], patch_type="rect", size=.75, color="y", zorder=11)
ldC = plot.create_bus_collection(net, net.asymmetric_load.bus.values[np.where(net.asymmetric_load.p_c_mw >0)], patch_type="circle", size=.75, color="g", zorder=11)
plot.draw_collections([lcd, bc, tlc, tpc, sc,ldA,ldB,ldC], figsize=(10,7))
hp.pandapower.plotting.plotting_toolbox - INFO: Interpreting color (0.12156862745098039, 0.4666666666666667, 0.7058823529411765) as rgb or rgba! hp.pandapower.plotting.plotting_toolbox - WARNING: The number of given colors (1) is smaller than the number of nodes (907) to draw! The colors will be repeated to fit.
<AxesSubplot: >
Function to load time series data and perform time-series three phase load flow
def timeseries_example(output_dir):
# 1. create test net
net = ieee_european_lv_asymmetric('on_peak_566')
pp.runpp_3ph(net)
# 2. create data source for loads
profiles, ds = create_data_source()
# 3. create controllers (to control P values of the load and the sgen)
net = create_controllers(net, ds)
# time steps to be calculated. Could also be a list with non-consecutive time steps
time_steps = range(0, 672)
print(time_steps)
# 4. the output writer with the desired results to be stored to files.
ow = create_output_writer(net, time_steps, output_dir)
# 5. the main time series function
run_timeseries(net, time_steps, output_writer=ow, run=pp.runpp_3ph, continue_on_divergence=True)
Data taken from csv data provided in IEEE benchmark grid
def create_data_source():
profiles = pd.DataFrame()
for loadprofile,file in (loadshapes[['Name','File']].values):
file_path = os.path.join(load_path, file)
profiles[loadprofile] = pd.read_csv(file_path).mult.values * 1e-3
profiles[loadprofile+'_Q'] = profiles[loadprofile] * np.tan(
np.arccos(float(loads[loads.Yearly==loadprofile].PF.values)))
ds = DFData(profiles)
return profiles, ds
P, Q values entered using P and power factor(cos_phi) data
def create_controllers(net, ds):
ConstControl(net, element='asymmetric_load', variable='p_a_mw', element_index=loads[loads['phases']=='A'].index,
data_source=ds, profile_name=loads[loads['phases']=='A'].Yearly)
ConstControl(net, element='asymmetric_load', variable='q_a_mvar', element_index=loads[loads['phases']=='A'].index,
data_source=ds, profile_name=loads[loads['phases']=='A'].Yearly+'_Q')
ConstControl(net, element='asymmetric_load', variable='p_b_mw', element_index=loads[loads['phases']=='B'].index,
data_source=ds, profile_name=loads[loads['phases']=='B'].Yearly)
ConstControl(net, element='asymmetric_load', variable='q_b_mvar', element_index=loads[loads['phases']=='B'].index,
data_source=ds, profile_name=loads[loads['phases']=='B'].Yearly+'_Q')
ConstControl(net, element='asymmetric_load', variable='p_c_mw', element_index=loads[loads['phases']=='C'].index,
data_source=ds, profile_name=loads[loads['phases']=='C'].Yearly)
ConstControl(net, element='asymmetric_load', variable='q_c_mvar', element_index=loads[loads['phases']=='C'].index,
data_source=ds, profile_name=loads[loads['phases']=='C'].Yearly+'_Q')
return net
def create_output_writer(net, time_steps, output_dir):
ow = OutputWriter(net, time_steps, output_path=output_dir, output_file_type=".json")
ow.log_variable('res_trafo_3ph', 'p_a_lv_mw', index=net.trafo.index)
ow.log_variable('res_trafo_3ph', 'p_b_lv_mw', index=net.trafo.index)
ow.log_variable('res_trafo_3ph', 'p_c_lv_mw', index=net.trafo.index)
ow.log_variable('res_trafo_3ph', 'q_a_lv_mvar', index=net.trafo.index)
ow.log_variable('res_trafo_3ph', 'q_b_lv_mvar', index=net.trafo.index)
ow.log_variable('res_trafo_3ph', 'q_c_lv_mvar', index=net.trafo.index)
ow.log_variable('res_bus_3ph', 'vm_a_pu', index=[34]) # Load 1 Phase A
ow.log_variable('res_bus_3ph', 'vm_b_pu', index=[899]) # Load 53 Phase B
ow.log_variable('res_bus_3ph', 'vm_c_pu', index=[614]) # Load 32 Phase C
ow.log_variable('res_bus_3ph', 'va_a_degree', index=[34]) # Load 1 Phase A
ow.log_variable('res_bus_3ph', 'va_b_degree', index=[899]) # Load 53 Phase B
ow.log_variable('res_bus_3ph', 'va_c_degree', index=[614]) # Load 32 Phase C
return ow
output_dir = os.path.join(tempfile.gettempdir(), "time_series_example")
print("Results can be found in your local temp folder: {}".format(output_dir))
if not os.path.exists(output_dir):
os.mkdir(output_dir)
timeseries_example(output_dir)
Results can be found in your local temp folder: C:\Users\RBOLGA~1\AppData\Local\Temp\time_series_example
C:\Users\rbolgaryn\AppData\Local\Temp\ipykernel_27344\1411155948.py:5: PerformanceWarning: DataFrame is highly fragmented. This is usually the result of calling `frame.insert` many times, which has poor performance. Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()` profiles[loadprofile] = pd.read_csv(file_path).mult.values*1e-3 C:\Users\rbolgaryn\AppData\Local\Temp\ipykernel_27344\1411155948.py:6: PerformanceWarning: DataFrame is highly fragmented. This is usually the result of calling `frame.insert` many times, which has poor performance. Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()` profiles[loadprofile+'_Q'] = profiles[loadprofile] * np.tan( C:\Users\rbolgaryn\AppData\Local\Temp\ipykernel_27344\1411155948.py:5: PerformanceWarning: DataFrame is highly fragmented. This is usually the result of calling `frame.insert` many times, which has poor performance. Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()` profiles[loadprofile] = pd.read_csv(file_path).mult.values*1e-3 C:\Users\rbolgaryn\AppData\Local\Temp\ipykernel_27344\1411155948.py:6: PerformanceWarning: DataFrame is highly fragmented. This is usually the result of calling `frame.insert` many times, which has poor performance. Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()` profiles[loadprofile+'_Q'] = profiles[loadprofile] * np.tan( C:\Users\rbolgaryn\AppData\Local\Temp\ipykernel_27344\1411155948.py:5: PerformanceWarning: DataFrame is highly fragmented. This is usually the result of calling `frame.insert` many times, which has poor performance. Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()` profiles[loadprofile] = pd.read_csv(file_path).mult.values*1e-3 C:\Users\rbolgaryn\AppData\Local\Temp\ipykernel_27344\1411155948.py:6: PerformanceWarning: DataFrame is highly fragmented. This is usually the result of calling `frame.insert` many times, which has poor performance. Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()` profiles[loadprofile+'_Q'] = profiles[loadprofile] * np.tan( C:\Users\rbolgaryn\AppData\Local\Temp\ipykernel_27344\1411155948.py:5: PerformanceWarning: DataFrame is highly fragmented. This is usually the result of calling `frame.insert` many times, which has poor performance. Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()` profiles[loadprofile] = pd.read_csv(file_path).mult.values*1e-3 C:\Users\rbolgaryn\AppData\Local\Temp\ipykernel_27344\1411155948.py:6: PerformanceWarning: DataFrame is highly fragmented. This is usually the result of calling `frame.insert` many times, which has poor performance. Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()` profiles[loadprofile+'_Q'] = profiles[loadprofile] * np.tan( C:\Users\rbolgaryn\AppData\Local\Temp\ipykernel_27344\1411155948.py:5: PerformanceWarning: DataFrame is highly fragmented. This is usually the result of calling `frame.insert` many times, which has poor performance. Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()` profiles[loadprofile] = pd.read_csv(file_path).mult.values*1e-3 C:\Users\rbolgaryn\AppData\Local\Temp\ipykernel_27344\1411155948.py:6: PerformanceWarning: DataFrame is highly fragmented. This is usually the result of calling `frame.insert` many times, which has poor performance. Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()` profiles[loadprofile+'_Q'] = profiles[loadprofile] * np.tan( hp.pandapower.timeseries.run_time_series - WARNING: deprecated: output_writer should not be given to run_timeseries(). This overwrites the stored one in net.output_writer.
range(0, 672)
100%|████████████████████████████████████████████████████████████████████████████████| 672/672 [01:12<00:00, 9.30it/s]
A pandas dataframe is made from the output '.json' file This dataframe will be used for plotting results using matplotlib
output_dir = os.path.join(tempfile.gettempdir(), "time_series_example")
# Power Values from secondary of the transformer
PA = os.path.join(output_dir,'res_trafo_3ph','p_a_lv_mw.json')
PB = os.path.join(output_dir,'res_trafo_3ph','p_b_lv_mw.json')
PC = os.path.join(output_dir,'res_trafo_3ph','p_c_lv_mw.json')
QA = os.path.join(output_dir,'res_trafo_3ph','q_a_lv_mvar.json')
QB = os.path.join(output_dir,'res_trafo_3ph','q_b_lv_mvar.json')
QC = os.path.join(output_dir,'res_trafo_3ph','q_c_lv_mvar.json')
#pandapower Results
df_pp = pd.read_json(PA)*-1e3
df_pp['PB'] = pd.read_json(PB)*-1e3
df_pp['PC'] = pd.read_json(PC)*-1e3
df_pp['P_SUM'] = df_pp.sum(axis=1)
df_pp = df_pp.rename(columns={0:'PA'})
df_pq = pd.read_json(QA)*-1e3
df_pq['QB'] = pd.read_json(QB)*-1e3
df_pq['QC'] = pd.read_json(QC)*-1e3
df_pq = df_pq.rename(columns={0:'QA'})
df_pp['Q_SUM'] = df_pq.sum(axis=1)
#The magnitude of voltage at LOAD1 (phase A), LOAD32 (phase C), and LOAD53 (phase B) over the one-day period are shown
pp_va = os.path.join(output_dir,'res_bus_3ph','vm_a_pu.json')
pp_vb = os.path.join(output_dir,'res_bus_3ph','vm_b_pu.json')
pp_vc = os.path.join(output_dir,'res_bus_3ph','vm_c_pu.json')
#pandapower Results
df_pp_v = pd.read_json(pp_va)*(416/np.sqrt(3))
df_pp_v['VB'] = pd.read_json(pp_vb)*(416/np.sqrt(3))
df_pp_v['VC'] = pd.read_json(pp_vc)*(416/np.sqrt(3))
df_pp_v= df_pp_v.rename(columns={34: "VA"})
# This is required since json makes keys as string type, the index order is like 1, 10, 100 ,...
df_pp_v.index = df_pp_v.index.astype(np.int64)
df_pp_v = df_pp_v.sort_index()
df_pp.index = df_pp.index.astype(np.int64)
df_pp = df_pp.sort_index()
The following data is generated :
import matplotlib.pyplot as plt
fig,ax1 = plt.subplots(1,1)
df_pp_v.plot(kind='line', y=['VA','VB','VC'], ax=ax1, color=['blue','orange','green'],figsize=(10,5),legend = True)
ax1.set_ylim((235,255))
ax1.xaxis.set_label_text("time step")
ax1.yaxis.set_label_text("voltage mag. [Volts]")
ax1.set_title("Load Voltages (pandapower) / V")
Text(0.5, 1.0, 'Load Voltages (pandapower) / V')
Voltage values are taken from three loads of each phase :
Load #1 Phase A
Load #53 Phase B
Load #32 Phase C