|
1 | 1 | from django.core.management.base import BaseCommand, CommandError |
2 | | -from projects.models import ( |
3 | | - Asset, |
4 | | - AssetType, |
5 | | - TopologyNode, |
6 | | - Scenario, |
7 | | - Timeseries, |
8 | | - ConnectionLink, |
9 | | - Bus, |
10 | | -) |
11 | | -import pandas as pd |
| 2 | +from projects.models import Scenario |
12 | 3 | from pathlib import Path |
13 | | -import numpy as np |
14 | 4 | import shutil |
| 5 | +from oemof.datapackage.datapackage import building |
| 6 | +import datapackage as dp |
15 | 7 |
|
16 | 8 |
|
17 | 9 | class Command(BaseCommand): |
18 | 10 | help = "Convert the given scenarios to datapackages" |
19 | 11 |
|
20 | 12 | def add_arguments(self, parser): |
21 | 13 | parser.add_argument("scen_id", nargs="+", type=int) |
22 | | - |
23 | | - parser.add_argument( |
24 | | - "--overwrite", action="store_true", help="Overwrite the datapackage" |
25 | | - ) |
| 14 | + parser.add_argument("-o", "--outfile", type=str, nargs="?", const="") |
26 | 15 |
|
27 | 16 | def handle(self, *args, **options): |
28 | | - overwrite = options["overwrite"] |
29 | 17 |
|
30 | 18 | for scen_id in options["scen_id"]: |
31 | 19 | try: |
32 | 20 | scenario = Scenario.objects.get(pk=scen_id) |
33 | 21 | except Scenario.DoesNotExist: |
34 | 22 | raise CommandError('Scenario "%s" does not exist' % scen_id) |
35 | | - |
36 | | - destination_path = Path(__file__).resolve().parents[4] |
37 | | - |
38 | | - # Create a folder with a datapackage structure |
39 | | - scenario_folder = destination_path / f"scenario_{scen_id}" |
40 | | - create_folder = True |
41 | | - |
42 | | - if scenario_folder.exists(): |
43 | | - if not overwrite: |
44 | | - create_folder = False |
45 | | - else: |
46 | | - shutil.rmtree(scenario_folder) |
47 | | - |
48 | | - elements_folder = scenario_folder / "data" / "elements" |
49 | | - sequences_folder = scenario_folder / "data" / "sequences" |
50 | | - |
51 | | - if create_folder: |
52 | | - # create subfolders |
53 | | - (scenario_folder / "scripts").mkdir(parents=True) |
54 | | - elements_folder.mkdir(parents=True) |
55 | | - sequences_folder.mkdir(parents=True) |
56 | | - |
57 | | - # List all components of the scenario (except the busses) |
58 | | - qs_assets = Asset.objects.filter(scenario=scenario) |
59 | | - # List all distinct components' assettypes (or facade name) |
60 | | - facade_names = qs_assets.distinct().values_list( |
61 | | - "asset_type__asset_type", flat=True |
| 23 | + destination_path = options["outfile"] |
| 24 | + if destination_path == "": |
| 25 | + destination_path = Path(__file__).resolve().parents[4] |
| 26 | + else: |
| 27 | + destination_path = Path(destination_path) |
| 28 | + |
| 29 | + scenario_folder = destination_path / f"scenario_{scenario.name}".replace( |
| 30 | + " ", "_" |
62 | 31 | ) |
| 32 | + if scenario_folder.exists(): |
| 33 | + shutil.rmtree(scenario_folder) |
63 | 34 |
|
64 | | - bus_resource_records = [] |
65 | | - profile_resource_records = {} |
66 | | - for facade_name in facade_names: |
67 | | - resource_records = [] |
68 | | - for i, asset in enumerate( |
69 | | - qs_assets.filter(asset_type__asset_type=facade_name) |
70 | | - ): |
71 | | - resource_rec, bus_resource_rec, profile_resource_rec = ( |
72 | | - asset.to_datapackage() |
73 | | - ) |
74 | | - resource_records.append(resource_rec) |
75 | | - # those constitute the busses and sequences used by this asset |
76 | | - bus_resource_records.extend(bus_resource_rec) |
77 | | - profile_resource_records.update(profile_resource_rec) |
78 | | - |
79 | | - if resource_records: |
80 | | - out_path = elements_folder / f"{facade_name}.csv" |
81 | | - Path(out_path).parent.mkdir(parents=True, exist_ok=True) |
82 | | - df = pd.DataFrame(resource_records) |
83 | | - df.to_csv(out_path, index=False) |
| 35 | + dp_json = scenario_folder / "datapackage.json" |
84 | 36 |
|
85 | | - # Save all unique busses to a elements resource |
86 | | - if bus_resource_records: |
87 | | - out_path = elements_folder / f"bus.csv" |
88 | | - Path(out_path).parent.mkdir(parents=True, exist_ok=True) |
89 | | - df = pd.DataFrame(bus_resource_records) |
90 | | - df.drop_duplicates("name").to_csv(out_path, index=False) |
| 37 | + if dp_json.exists(): |
| 38 | + print("Only inferring metadata") |
| 39 | + p = dp.Package(str(dp_json)) |
| 40 | + building.infer_package_foreign_keys(p, fk_targets=["project"]) |
| 41 | + p.descriptor["resources"].sort(key=lambda x: (x["path"], x["name"])) |
| 42 | + p.commit() |
| 43 | + p.save(dp_json) |
91 | 44 |
|
92 | | - # Save all profiles to a sequences resource |
93 | | - if profile_resource_records: |
94 | | - out_path = sequences_folder / f"profiles.csv" |
95 | | - Path(out_path).parent.mkdir(parents=True, exist_ok=True) |
96 | | - # add timestamps to the profiles |
97 | | - profile_resource_records["timeindex"] = scenario.get_timestamps() |
98 | | - try: |
99 | | - df = pd.DataFrame(profile_resource_records) |
100 | | - except ValueError as e: |
101 | | - # If not all profiles have the same length we pad the shorter profiles with np.nan |
102 | | - max_len = max(len(v) for v in profile_resource_records.values()) |
103 | | - profile_resource_records = { |
104 | | - k: v + [np.nan] * (max_len - len(v)) |
105 | | - for k, v in profile_resource_records.items() |
106 | | - } |
107 | | - df = pd.DataFrame(profile_resource_records) |
108 | | - print( |
109 | | - f"Some profiles have more timesteps that other profiles in scenario {scenario.name}({scen_id}) --> the shorter profiles will be expanded with NaN values" |
110 | | - ) |
111 | | - # TODO check if there are column duplicates |
112 | | - df.set_index("timeindex").to_csv(out_path, index=True) |
| 45 | + else: |
| 46 | + print("Creating datapackage.json") |
| 47 | + scenario.to_datapackage(destination_path) |
0 commit comments