Skip to content

Commit

Permalink
Merge pull request #250 from openego/dev
Browse files Browse the repository at this point in the history
Release v0.1.4
  • Loading branch information
gplssm authored Jan 17, 2018
2 parents e265159 + b17681d commit eaaa5fe
Show file tree
Hide file tree
Showing 29 changed files with 1,423 additions and 418 deletions.
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ in the [documentation](https://ding0.readthedocs.io)
A [set of examples](https://ding0.readthedocs.io/en/dev/usage_details.html#examples)
is provided to show how to use Ding0.

An overview on versions of data already generated [can be found in the wiki](https://github.com/openego/ding0/wiki/Ding0-datasets).

LICENSE
-------

Expand Down
2 changes: 2 additions & 0 deletions check_ding0_installation.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
# If you haven't installed docker yet, check out install_docker.sh
#
# Note: in this script all docker commands are executed as root (via sudo) as this is required on debian (solydX) systems
#
# Execute this script from its current location (same where setup.py lives)

# make ding0 install script and dependency install script executable
chmod +x install_ding0.sh
Expand Down
2 changes: 1 addition & 1 deletion ding0/config/config_calc.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
#operator_diff_round_digits: unit: -
operator_diff_round_digits = 3

#conn_diff_tolerance: init: -
#conn_diff_tolerance: unit: -
conn_diff_tolerance = 0.0001

#load_threshold: unit: kW
Expand Down
10 changes: 5 additions & 5 deletions ding0/config/config_db_tables.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -13,18 +13,18 @@ lv_load_areas = EgoDemandLoadarea
lv_grid_district = EgoGridLvGriddistrict
mv_stations = EgoGridHvmvSubstation
lv_stations = EgoGridMvlvSubstation
re_generators = EgoSupplyResPowerplant
conv_generators = EgoSupplyConvPowerplant
re_generators = t_ego_supply_res_powerplant_sq_mview
conv_generators = t_ego_supply_conv_powerplant_sq_mview

[versioned]
mv_grid_districts = EgoDpMvGriddistrict
lv_load_areas = EgoDpLoadarea
lv_grid_district = EgoDpLvGriddistrict
mv_stations = EgoDpHvmvSubstation
lv_stations = EgoDpMvlvSubstation
re_generators = EgoDpResPowerplant
conv_generators = EgoDpConvPowerplant
version = v0.2.10
re_generators = t_ego_dp_res_powerplant_sq_mview
conv_generators = t_ego_dp_conv_powerplant_sq_mview
version = v0.3.0

[input_data_source]
input_data = versioned
384 changes: 293 additions & 91 deletions ding0/core/__init__.py

Large diffs are not rendered by default.

9 changes: 9 additions & 0 deletions ding0/core/network/grids.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,15 @@ def add_cable_distributor(self, cable_dist):
self._cable_distributors.append(cable_dist)
self.graph_add_node(cable_dist)

def remove_cable_distributor(self, cable_dist):
"""Removes a cable distributor from _cable_distributors if existing"""
if cable_dist in self.cable_distributors() and isinstance(cable_dist,
MVCableDistributorDing0):
# remove from array and graph
self._cable_distributors.remove(cable_dist)
if self._graph.has_node(cable_dist):
self._graph.remove_node(cable_dist)

def add_ring(self, ring):
"""Adds a ring to _rings if not already existing"""
if ring not in self._rings and isinstance(ring, RingDing0):
Expand Down
27 changes: 13 additions & 14 deletions ding0/core/network/stations.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,48 +124,47 @@ def select_transformers(self):
cum_peak_load = self.peak_load / cos_phi_load
cum_peak_generation = self.peak_generation(mode='MVLV') / cos_phi_feedin

kw2mw = 1e-3

# check if load or generation is greater respecting corresponding load factor
if (cum_peak_load / load_factor_mv_trans_lc_normal) > \
(cum_peak_generation / load_factor_mv_trans_fc_normal):
# use peak load and load factor from load case
load_factor_mv_trans = load_factor_mv_trans_lc_normal
residual_apparent_power = cum_peak_load * kw2mw
residual_apparent_power = cum_peak_load
else:
# use peak generation and load factor for feedin case
load_factor_mv_trans = load_factor_mv_trans_fc_normal
residual_apparent_power = cum_peak_generation * kw2mw
residual_apparent_power = cum_peak_generation

# determine number and size of required transformers

# get max. trafo
transformer_max = trafo_parameters.iloc[trafo_parameters['s_nom'].idxmax()]
transformer_max = trafo_parameters.iloc[trafo_parameters['S_nom'].idxmax()]

while residual_apparent_power > 0:
if residual_apparent_power > load_factor_mv_trans * transformer_max['s_nom']:
if residual_apparent_power > load_factor_mv_trans * transformer_max['S_nom']:
transformer = transformer_max
else:
# choose trafo
transformer = trafo_parameters.iloc[
trafo_parameters[trafo_parameters['s_nom'] * load_factor_mv_trans >
residual_apparent_power]['s_nom'].idxmin()]
trafo_parameters[trafo_parameters['S_nom'] * load_factor_mv_trans >
residual_apparent_power]['S_nom'].idxmin()]

# add transformer on determined size with according parameters
self.add_transformer(TransformerDing0(**{'grid': self.grid,
'v_level': self.grid.v_level,
's_max_longterm': transformer['s_nom']}))
's_max_longterm': transformer['S_nom']}))
# calc residual load
residual_apparent_power -= (load_factor_mv_trans *
transformer['s_nom'])
transformer['S_nom'])

# if no transformer was selected (no load in grid district), use smallest one
if len(self._transformers) == 0:
transformer = trafo_parameters.iloc[trafo_parameters['s_nom'].idxmin()]
transformer = trafo_parameters.iloc[trafo_parameters['S_nom'].idxmin()]

self.add_transformer(TransformerDing0(**{'grid': self.grid,
'v_level': self.grid.v_level,
's_max_longterm': transformer['s_nom']}))
self.add_transformer(
TransformerDing0(grid=self.grid,
v_level=self.grid.v_level,
s_max_longterm=transformer['S_nom']))

# add redundant transformer of the size of the largest transformer
s_max_max = max((o.s_max_a for o in self._transformers))
Expand Down
2 changes: 1 addition & 1 deletion ding0/data/equipment-parameters_LV_transformers.csv
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
s_nom,r,x
S_nom,R,X
#kVA,Ohm,Ohm
# Source 100..1000 kVar transformers: Torsten Werth; Netzberechnung mit Erzeugungsprofilen --
# Grundlagen, Berechnung, Anwendung; Springer; 2016
Expand Down
2 changes: 1 addition & 1 deletion ding0/data/equipment-parameters_MV_transformers.csv
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
s_nom,r,x
S_nom,R,X
#kVA,Ohm,Ohm
20000,0,0
32000,0,0
Expand Down
17 changes: 10 additions & 7 deletions ding0/examples/example_multiple_grid_districts.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,11 @@
import time
import os
import pandas as pd
from sqlalchemy.orm import sessionmaker

from ding0.core import NetworkDing0
from ding0.tools import config as cfg_ding0, results, db
from ding0.tools import config as cfg_ding0, results
from egoio.tools import db
import json
from datetime import datetime

Expand Down Expand Up @@ -99,8 +101,9 @@ def run_multiple_grid_districts(mv_grid_districts, run_id, failsafe=False,
if base_path is None:
base_path = BASEPATH

# database connection
conn = db.connection(section='oedb')
# database connection/ session
engine = db.connection(section='oedb')
session = sessionmaker(bind=engine)()

corrupt_grid_districts = pd.DataFrame(columns=['id', 'message'])

Expand All @@ -113,15 +116,15 @@ def run_multiple_grid_districts(mv_grid_districts, run_id, failsafe=False,

if not failsafe:
# run DING0 on selected MV Grid District
msg = nd.run_ding0(conn=conn,
msg = nd.run_ding0(session=session,
mv_grid_districts_no=[mvgd])

# save results
results.save_nd_to_pickle(nd, os.path.join(base_path, run_id))
else:
# try to perform ding0 run on grid district
try:
msg = nd.run_ding0(conn=conn,
msg = nd.run_ding0(session=session,
mv_grid_districts_no=[mvgd])
# if not successful, put grid district to report
if msg:
Expand Down Expand Up @@ -166,7 +169,6 @@ def run_multiple_grid_districts(mv_grid_districts, run_id, failsafe=False,
index=False,
float_format='%.0f')

conn.close()
print('Elapsed time for', str(len(mv_grid_districts)),
'MV grid districts (seconds): {}'.format(time.time() - start))

Expand All @@ -175,7 +177,8 @@ def run_multiple_grid_districts(mv_grid_districts, run_id, failsafe=False,


if __name__ == '__main__':
base_path='/home/guido/mnt/rli-daten/Ding0/'
base_path = BASEPATH

# set run_id to current timestamp
run_id = datetime.now().strftime("%Y%m%d%H%M%S")

Expand Down
45 changes: 23 additions & 22 deletions ding0/examples/example_parallel_multiple_grid_districts.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,14 @@
import itertools

from ding0.core import NetworkDing0
from ding0.tools import results, db
from ding0.tools import results
from egoio.tools import db

from math import floor
import multiprocessing as mp
import pandas as pd
import json
from sqlalchemy.orm import sessionmaker


BASEPATH = os.path.join(os.path.expanduser('~'), '.ding0')
Expand All @@ -37,7 +39,7 @@ def parallel_run(districts_list, n_of_processes, n_of_districts, run_id,
base_path=None):
'''Organize parallel runs of ding0.
The function take all districts in a list and divide them into
The function take all districts in a list and divide them into
n_of_processes parallel processes. For each process, the assigned districts
are given to the function process_runs() with the argument n_of_districts
Expand All @@ -59,7 +61,7 @@ def parallel_run(districts_list, n_of_processes, n_of_districts, run_id,
windows systems).
Specify your own but keep in mind that it a required a particular
structure of subdirectories.
See Also
--------
ding0_runs
Expand Down Expand Up @@ -92,17 +94,17 @@ def parallel_run(districts_list, n_of_processes, n_of_districts, run_id,
mv_districts = th
processes.append(mp.Process(target=process_runs,
args=(mv_districts, n_of_districts,
output_info)))
output_info, run_id, base_path)))
#######################################################################
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
# Resque output_info from processes
output = [output_info.get() for p in processes]
output = list(itertools.chain.from_iterable(output))
# Exit the completed processes
for p in processes:
p.join()

#######################################################################
print('Elapsed time for', str(max_dist),
Expand All @@ -111,9 +113,9 @@ def parallel_run(districts_list, n_of_processes, n_of_districts, run_id,
return output

########################################################
def process_runs(mv_districts, n_of_districts, output_info):
def process_runs(mv_districts, n_of_districts, output_info, run_id, base_path):
'''Runs a process organized by parallel_run()
The function take all districts mv_districts and divide them into clusters
of n_of_districts each. For each cluster, ding0 is run and the resulting
network is saved as a pickle
Expand All @@ -135,15 +137,17 @@ def process_runs(mv_districts, n_of_districts, output_info):
windows systems).
Specify your own but keep in mind that it a required a particular
structure of subdirectories.
See Also
--------
parallel_run
'''
#######################################################################
# database connection
conn = db.connection(section='oedb')
# database connection/ session
engine = db.connection(section='oedb')
session = sessionmaker(bind=engine)()

#############################
clusters = [mv_districts[x:x + n_of_districts] for x in range(0, len(mv_districts), n_of_districts)]
output_clusters= []
Expand All @@ -158,11 +162,11 @@ def process_runs(mv_districts, n_of_districts, output_info):
nw_name = nw_name+'_to_'+str(cl[-1])
nw = NetworkDing0(name=nw_name)
try:
msg = nw.run_ding0(conn=conn, mv_grid_districts_no=cl)
msg = nw.run_ding0(session=session, mv_grid_districts_no=cl)
if msg:
status = 'run error'
else:
msg = 'OK'
msg = ''
status = 'OK'
results.save_nd_to_pickle(nw, os.path.join(base_path, run_id))
output_clusters.append((nw_name,status,msg, nw.metadata))
Expand All @@ -174,8 +178,7 @@ def process_runs(mv_districts, n_of_districts, output_info):


#######################################################################
#close connection and bye bye
conn.close()


def process_metadata(meta):
"""
Expand Down Expand Up @@ -206,27 +209,25 @@ def process_metadata(meta):
return metadata



if __name__ == '__main__':
# define individual base path
base_path = '/home/guido/mnt/rli-daten/Ding0/'
base_path = BASEPATH

# set run_id to current timestamp
run_id = datetime.now().strftime("%Y%m%d%H%M%S")


# run in parallel
mv_grid_districts = list(range(250, 254))
mv_grid_districts = list(range(1, 3609))
n_of_processes = mp.cpu_count() #number of parallel threaths
n_of_districts = 1 #n° of districts in each serial cluster

out = parallel_run(mv_grid_districts, n_of_processes, n_of_districts,
run_id, base_path=base_path)

# report on unsuccessful runs
out_info = [_[0:3] for _ in out]
corrupt_out = [_[0:3] for _ in out if not _[1]=='OK']

corrupt_grid_districts = pd.DataFrame(out_info,
corrupt_grid_districts = pd.DataFrame(corrupt_out,
columns=['grid', 'status', 'message'])
corrupt_grid_districts.to_csv(
os.path.join(
Expand Down
15 changes: 7 additions & 8 deletions ding0/examples/example_single_grid_district.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,37 +18,36 @@
# ===== IMPORTS AND CONFIGURATION =====

# import DB interface
from ding0.tools import db
from egoio.tools import db

# import required modules of DING0
from ding0.core import NetworkDing0
from ding0.tools.logger import setup_logger
from ding0.tools.results import save_nd_to_pickle
import logging
from sqlalchemy.orm import sessionmaker

# define logger
logger = setup_logger()

# ===== MAIN =====

# database connection
conn = db.connection(section='oedb')
# database connection/ session
engine = db.connection(section='oedb')
session = sessionmaker(bind=engine)()

# instantiate new ding0 network object
nd = NetworkDing0(name='network')

# choose MV Grid Districts to import
mv_grid_districts = [3545]
mv_grid_districts = [3040]

# run DING0 on selected MV Grid District
nd.run_ding0(conn=conn,
nd.run_ding0(session=session,
mv_grid_districts_no=mv_grid_districts)

# export grids to database
# nd.export_mv_grid(conn, mv_grid_districts)
# nd.export_mv_grid_new(conn, mv_grid_districts)

conn.close()

# export grid to file (pickle)
save_nd_to_pickle(nd, filename='ding0_grids_example.pkl')
Loading

0 comments on commit eaaa5fe

Please sign in to comment.