Единтсвенное что работает это Docker. Далее у меня не понятные ошибки :(

This commit is contained in:
maksim 2024-10-04 17:19:46 +04:00
parent b08617960e
commit 57b2836861
18 changed files with 2681 additions and 2 deletions

5
.env Normal file
View File

@ -0,0 +1,5 @@
DATABASE=SuperService
POSTGRES_USER=UserSuperService
POSTGRES_PASSWORD=NotWarningWord1
CLICKHOUSE_USER=UserMyHouse
CLICKHOUSE_PASSWORD=NotWarningWord2

View File

@ -1,4 +1,4 @@
# PIbd-42_SSPR # Описание проекта
Доработка автоматизированной системы планирования и выполнения численного моделирования процессов сжигания топлив в горелочных устройствах энергоустановок предназначена для оптимизации процессов проведения численных экспериментов с цифровой моделью горелочного устройства с целью поиска наиболее экономичных и низко-эмиссионных режимов работы Доработка автоматизированной системы планирования и выполнения численного моделирования процессов сжигания топлив в горелочных устройствах энергоустановок предназначена для оптимизации процессов проведения численных экспериментов с цифровой моделью горелочного устройства с целью поиска наиболее экономичных и низко-эмиссионных режимов работы
@ -7,4 +7,56 @@
2. Клюшенкова Ксения 2. Клюшенкова Ксения
3. Базунов Андрей 3. Базунов Андрей
4. Жимолостнова Анна 4. Жимолостнова Анна
5. Цуканова Ирина 5. Цуканова Ирина
# Запуск проекта
## 1. Создание окружения
```
py -m venv .venv
```
## 2. Переход в окружение
```
.\.venv\Scripts\activate
```
## 3. Скачивание библиотек
```
pip install -r .\requirements.txt
```
## 4. Создаем .env
Необходимо создать файл и поместить в него необходимые параметры.
```
DATABASE=SuperService
POSTGRES_USER=UserSuperService
POSTGRES_PASSWORD=NotWarningWord1
CLICKHOUSE_USER=UserMyHouse
CLICKHOUSE_PASSWORD=NotWarningWord2
```
## 5. Запускаем все контейнеры
```
docker-compose up --build
```
При необходимости можем закрыть контейнера
```
docker-compose down
```
## 6. Запускаем проект
```
python runner.py
```
## 7. Подключение к ClickHouse
Команда входа в ClickHouse
```
docker exec -it clickhouse-db clickhouse-client -u UserMyHouse --password NotWarningWord2 --host localhost
```
Использовать базу данных
```
USE SuperService;
```
## 7. Подключение к PostgreSQL
Команда входа в ClickHouse
```
docker exec -it postgres-db psql -U UserSuperService -d SuperService
```

109
clickhouse_tools.py Normal file
View File

@ -0,0 +1,109 @@
import time
import pandas as pd
import clickhouse_connect
class ClickHouseClient:
def __init__(self, host='localhost', port=9000, database="default", username="", password=""):
self.host = host
self.port = port
self.database = database
self.username = username
self.password = password
self.client = self.connect_clickhouse()
self.initialize_table()
def connect_clickhouse(self):
"""
Создает подключение к базе данных ClickHouse.
"""
return clickhouse_connect.get_client(host=self.host, port=self.port, database=self.database, username=self.username, password=self.password)
def initialize_table(self):
"""
Инициализирует таблицу в ClickHouse, если она не существует.
"""
create_table_query = """
CREATE TABLE IF NOT EXISTS experiment_data (
volume Float64,
nitrogen_oxide_emission Float64,
temperature Float64,
co_fraction Float64,
co2_fraction Float64,
x Float64,
y Float64,
z Float64,
file_id String
) ENGINE = MergeTree()
ORDER BY file_id
"""
self.client.command(create_table_query)
def get_data(self):
"""
Извлекает максимальные и взвешенные средние значения из ClickHouse, включая разницу max(x) - min(x) при температуре > 1150.
:return: DataFrame с данными из ClickHouse.
"""
query = """
SELECT
file_id,
MAX(temperature) AS max_temperature,
MAX(nitrogen_oxide_emission) AS max_nox,
MAX(co_fraction) AS max_co,
MAX(co2_fraction) AS max_co2,
SUM(volume * temperature) / SUM(volume) AS weighted_avg_temperature,
SUM(volume * nitrogen_oxide_emission) / SUM(volume) AS weighted_avg_nox,
SUM(volume * co_fraction) / SUM(volume) AS weighted_avg_co,
SUM(volume * co2_fraction) / SUM(volume) AS weighted_avg_co2,
MAX(if(temperature > 1150, x, NULL)) - MIN(if(temperature > 1150, x, NULL)) AS x_range_high_temp,
MAX(if(temperature > 1150, y, NULL)) - MIN(if(temperature > 1150, y, NULL)) AS y_range_high_temp,
MAX(if(temperature > 1150, z, NULL)) - MIN(if(temperature > 1150, z, NULL)) AS z_range_high_temp,
SUM(if(temperature > 1150, volume, NULL)) AS flame_volume
FROM
experiment_data
GROUP BY
file_id
"""
results = self.client.query(query)
columns = ["file_id", "max_temperature", "max_nox", "max_co", "max_co2",
"weighted_avg_temperature", "weighted_avg_nox", "weighted_avg_co", "weighted_avg_co2",
"x_range_high_temp", "y_range_high_temp", "z_range_high_temp", "flame_volume"]
return pd.DataFrame(results.result_rows, columns=columns)
def save_csv_to_clickhouse(self, csv_path, file_id):
"""
Загружает данные из CSV файла в ClickHouse.
:param csv_path: Путь к CSV файлу.
:param file_id: Идентификатор файла.
"""
# Чтение данных из CSV
df = pd.read_csv(csv_path, delimiter=';', decimal='.')
# Переименовать колонки
rename_dict = {
"Volume (m^3)": "volume",
"Mass Fraction of Nitrogen Oxide Emission": "nitrogen_oxide_emission",
"Temperature (K)": "temperature",
"Mass Fraction of CO": "co_fraction",
"Mass Fraction of CO2": "co2_fraction",
"X (m)": "x",
"Y (m)": "y",
"Z (m)": "z"
}
df.rename(columns=rename_dict, inplace=True)
df['x'] = abs(df['x'])
df['x'] = df['x'] - df['x'].min()
df['y'] = df['y'] - df['y'].min()
df['z'] = df['z'] - df['z'].min()
# Добавление столбца идентификатора файла
df["file_id"] = file_id
# Удаление существующих записей для данного файла
delete_query = "ALTER TABLE experiment_data DELETE WHERE file_id = %(file_id)s"
self.client.command(delete_query, parameters={'file_id': file_id})
# Вставка данных в ClickHouse
self.client.insert_df('experiment_data', df)

26
config.yaml Normal file
View File

@ -0,0 +1,26 @@
paths:
starccm: '/home/user/Siemens/19.02.009-R8/STAR-CCM+19.02.009-R8/star/bin/starccm+'
chemkin: '/media/user/Projects/burner_data/chemkin'
main: '/media/user/Data/experiment_data'
parameters:
number_processes: 16
mesh_base_size: 0.7
stopping_criterion: 7000
diameters:
d1: 1442
d2: 1016
d3: 640
d4: 325
d5: 325
d6: 245
default_values:
N1: 24
L1: 70.0
a1: 60.0
N2: 18
L2: 105.0
N3: 9
L3: 29.6
api_url: "http://10.6.23.120:8000/api/v1/generate_geom"

57
data_models.py Normal file
View File

@ -0,0 +1,57 @@
import matplotlib.pyplot as mp
import pandas as pd
import seaborn as sb
from settings import settings
from clickhouse_tools import ClickHouseClient
from postgres_tools import PostgresClient
class DataPreparer:
def __init__(self, clickhouse_host='localhost', postgres_host='localhost', postgres_db='your_db',
postgres_user='your_user', postgres_password='your_password'):
self.clickhouse_client = ClickHouseClient("localhost", 8123, database=settings.DATABASE, username=settings.CLICKHOUSE_USER, password=settings.CLICKHOUSE_PASSWORD)
self.postgres_client = PostgresClient(
dbname=settings.DATABASE,
user=settings.POSTGRES_USER,
password=settings.POSTGRES_PASSWORD,
host="localhost",
port="5432"
)
def prepare_ml_dataset(self):
"""
Подготавливает набор данных для машинного обучения, объединяя данные из ClickHouse и PostgreSQL.
:return: DataFrame с подготовленными данными.
"""
# clickhouse_data = self.clickhouse_client.get_data()
postgres_data = self.postgres_client.get_experiments()
result_data = self.postgres_client.get_data()
# Объединение данных по file_id
ml_dataset = pd.merge(postgres_data, result_data, on='file_id')
self.postgres_client.close()
return ml_dataset
data_preparer = DataPreparer()
# Подготовка набора данных для машинного обучения
ml_dataset = data_preparer.prepare_ml_dataset()
ml_dataset = ml_dataset.drop('file_id', axis=1)
ml_dataset.to_csv('burner_data_pg_2.csv', index=False)
# Находим колонки с одним уникальным значением
cols_to_drop = ml_dataset.columns[ml_dataset.nunique() == 1]
# Удаляем эти колонки
ml_dataset = ml_dataset.drop(columns=cols_to_drop)
fig, ax = mp.subplots(figsize=(40, 40))
dataplot = sb.heatmap(ml_dataset.corr(), cmap="YlGnBu", annot=True)
# displaying heatmap
mp.show()

30
docker-compose.yml Normal file
View File

@ -0,0 +1,30 @@
version: '3.8'
services:
db:
image: postgres
container_name: postgres-db
environment:
POSTGRES_DB: ${DATABASE}
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
ports:
- "5432:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
clickhouse:
image: clickhouse/clickhouse-server:latest
container_name: clickhouse-db
environment:
CLICKHOUSE_USER: ${CLICKHOUSE_USER}
CLICKHOUSE_PASSWORD: ${CLICKHOUSE_PASSWORD}
ports:
- "8123:8123"
- "9000:9000"
volumes:
- clickhouse_data:/var/lib/clickhouse
volumes:
postgres_data:
clickhouse_data:

213
experiment_planner.py Normal file
View File

@ -0,0 +1,213 @@
import requests
import os
import subprocess
import time
import psutil
import argparse
import macros_generator as mg
STARCCM_PATH = '/home/user/Siemens/19.02.009-R8/STAR-CCM+19.02.009-R8/star/bin/starccm+'
NUMBER_PROCESSES = 16
def download_file_from_fastapi(api_url, params, full_file_name):
response = requests.post(api_url, json=params)
if response.status_code == 200:
with open(full_file_name, "wb") as f:
f.write(response.content)
print("File downloaded successfully.")
else:
print(f"Failed to download file. Status code: {response.status_code}")
def terminate_process_by_name(process_name):
# Проходим по всем процессам в системе
for proc in psutil.process_iter(['pid', 'name']):
try:
# Если имя процесса совпадает с искомым
if proc.info['name'] == process_name:
# Завершаем процесс
proc.terminate()
print(f"Процесс '{process_name}' с PID {proc.pid} был завершен.")
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess) as e:
print(f"Не удалось завершить процесс '{process_name}': {e}")
def create_directory(path):
if not os.path.exists(path):
os.makedirs(path)
def delete_directory(path):
if os.path.exists(path):
os.remove(path)
def run_macros(macros_name, model_name=None, new_model=False, is_gpgpu=False):
np_value = '1 -gpgpu auto' if is_gpgpu else str(NUMBER_PROCESSES)
if new_model:
macros_command = f'{STARCCM_PATH} -np {np_value} -new -batch \'{macros_name}\''
else:
if model_name is None:
raise ValueError("model_name must be provided if new_model is False")
macros_command = f'{STARCCM_PATH} -np {np_value} \'{model_name}\' -batch \'{macros_name}\''
subprocess.run(
["bash", "-c", macros_command])
def run_experiment(angle):
chemkin_path = "/media/user/Новый том/burner_automation/burner_data/chemkin"
mesh_base_size = 0.7
cpu_stopping_criterion = 200
stopping_criterion = 7000
main_path = '/media/user/Новый том/burner_automation/experiment_data'
create_directory(main_path)
diameters = {'d1': 1442, 'd2': 1016, 'd3': 640, 'd4': 325, 'd5': 325, 'd6': 245}
default_values = {
"N1": 24,
"L1": 70.0,
"a1": 60.0,
"N2": 18,
"L2": 105.0,
"N3": 9,
"L3": 30.0
}
# number_outer_blades = [12, 24, 36, 48]
#number_outer_blades = 24.0
# length_outer_blades = [44, 70, 86, 107.5]
#length_outer_blades = 70.0
# angle_outer_blades = [30.0, 45.0, 60.0, 75.0]
# number_middle_blades = [9, 18, 27, 36]
#number_middle_blades = 18.0
# load = [190, 260, 315, 400, 465]
# load_eco = [260, 315, 400, 465]
load = 450
# recycling_eco = [0, 7, 14, 21, 28]
# recycling = [0, 5, 10, 15, 20, 25, 30]
# recycling = [0, 6, 12, 18, 24, 30]
recycling = 0
start_time = time.time()
directories_list = []
api_url = "http://10.6.23.120:8000/api/v1/generate_geom"
params = {
"a1": angle
}
geometry_path = str(24) + '_' + str(70) + '_' + str(angle) + '_' + str(18)
geometry_path_full = os.path.join(main_path, geometry_path, 'geometry')
create_directory(geometry_path_full)
geometry_file_name = os.path.join(geometry_path_full, 'geometry.stp')
download_file_from_fastapi(api_url, params, geometry_file_name)
macros_path = os.path.join(main_path, geometry_path, 'general_macros')
directories_list.append(macros_path)
MODEL_PATH = os.path.join(main_path, geometry_path, 'model')
directories_list.append(MODEL_PATH)
model_parameters = {
'geometry_path': geometry_file_name,
'chemkin_path': chemkin_path,
'init_model_folder': MODEL_PATH,
'bladeCount': 18,
'mesh_base_size': mesh_base_size
}
fuel_parameters = mg.load_calculation(load, diameters)
recycling_parameters = mg.recycling_calculation(fuel_parameters['alpha'], fuel_parameters['gas_consumption'],
fuel_parameters['air_consumption'], recycling)
experiments_path = os.path.join(main_path, geometry_path, 'experiments')
directories_list.append(experiments_path)
load_path = os.path.join(experiments_path, str(load))
directories_list.append(load_path)
load_macros_experiment_path = os.path.join(load_path, 'macros')
directories_list.append(load_macros_experiment_path)
load_model_experiment_path = os.path.join(load_path, 'model')
directories_list.append(load_model_experiment_path)
recycling_experiment_path = os.path.join(load_path, str(recycling))
directories_list.append(recycling_experiment_path)
recycling_macros_experiment_path = os.path.join(recycling_experiment_path, 'macros')
directories_list.append(recycling_macros_experiment_path)
solver_parameters = {
'experiment_path': recycling_experiment_path,
'stopping_criterion': stopping_criterion
}
prc_macros_file = os.path.join(macros_path, 'preprocess_macro.java')
fuel_macros_file = os.path.join(load_macros_experiment_path, 'fuel_macro.java')
rec_macros_file = os.path.join(recycling_macros_experiment_path, 'recycle_macro.java')
run_macros_file = os.path.join(recycling_macros_experiment_path, 'run_macros.java')
for directory in directories_list:
create_directory(directory)
model_file = os.path.join(MODEL_PATH, 'init_model.sim')
mg.preprocessor_settings(prc_macros_file, model_parameters, model_file)
load_model_file = os.path.join(load_model_experiment_path, "load_"+str(load)+".sim")
mg.fuel_settings(fuel_macros_file, fuel_parameters, load_model_file)
exp_file = os.path.join(recycling_experiment_path, "recycling_"+str(recycling)+".sim")
mg.fgm_table_settings(rec_macros_file, recycling_parameters, exp_file)
mg.setting_and_running_solver(run_macros_file, solver_parameters, exp_file)
run_macros(prc_macros_file, new_model=True)
run_macros(fuel_macros_file, model_file)
run_macros(rec_macros_file, load_model_file)
run_macros(run_macros_file, exp_file, is_gpgpu=True)
_EXP_FILE = exp_file + "~"
delete_directory(_EXP_FILE)
# solver_parameters['stopping_criterion'] = STOPPING_CRITERION
# mg.setting_and_running_solver(run_macros_file, solver_parameters, EXP_FILE)
# run_macros(run_macros_file, EXP_FILE, is_gpgpu=True)
# _EXP_FILE = EXP_FILE + "~"
# delete_directory(_EXP_FILE)
# Конец замера времени
end_time = time.time()
# Вычисление времени выполнения
execution_time = end_time - start_time
print(f"Execution time: {execution_time}")
# time.sleep(10)
#
# processes_name = ["starccm+", "star-ccm+"]
#
# for process in processes_name:
# terminate_process_by_name(process)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Запуск экспериментов с энергетической установкой ГМУ-45")
parser.add_argument("angle", type=str, help="Угол наклона лопаток во внешнем контуре")
args = parser.parse_args()
run_experiment(args.angle)

74
insert_to_db.py Normal file
View File

@ -0,0 +1,74 @@
from pathlib import Path
import yaml
from settings import settings
from clickhouse_tools import ClickHouseClient
import utils
from postgres_tools import PostgresClient
# Загрузка конфигурации из файла config.yaml
with open('config.yaml', 'r') as config_file:
config = yaml.safe_load(config_file)
MAIN_PATH = config['paths']['main']
main_path = Path(MAIN_PATH)
def add_data_to_db(experiment_parameters, load_parameters, recycling_parameters):
geometry_path = (f"{experiment_parameters['outer_blades_count']}_{experiment_parameters['outer_blades_length']}_"
f"{experiment_parameters['outer_blades_angle']}_{experiment_parameters['middle_blades_count']}")
experiments_path = main_path / geometry_path / 'experiments'
load_path = experiments_path / str(experiment_parameters['load'])
load_parameters_path = load_path / 'parameters'
recycling_path = load_path / str(experiment_parameters['recycling'])
load_parameters_file = load_parameters_path / f"load_{experiment_parameters['load']}_parameters.yaml"
plot_csv = recycling_path / 'plot.csv'
table_csv = recycling_path / 'data_table.csv'
file_id = utils.calculate_hash(experiment_parameters)
clickhouse_client = ClickHouseClient("localhost", 8123, settings.DATABASE, settings.CLICKHOUSE_USER, settings.CLICKHOUSE_PASSWORD)
# Инициализация базы данных
db = PostgresClient(
dbname=settings.DATABASE,
user=settings.POSTGRES_USER,
password=settings.POSTGRES_PASSWORD,
host="localhost",
port="5432"
)
try:
if load_parameters_file.exists():
with open(load_parameters_file, 'r') as fuel_dict_file:
fuel_parameters = yaml.safe_load(fuel_dict_file)
load_parameters['primary_air_consumption'] = fuel_parameters['primary_air_consumption']
load_parameters['secondary_air_consumption'] = fuel_parameters['secondary_air_consumption']
load_parameters['gas_inlet_consumption'] = fuel_parameters['gas_inlet_consumption']
# Вставка данных в load_parameters и получение id
load_id = db.insert_load_parameters(load_parameters)
# Вставка данных в recycling_parameters и получение id
recycling_id = db.insert_recycling_parameters(recycling_parameters, load_id)
# Вставка данных в experiment_parameters
db.insert_experiment_parameters(experiment_parameters, load_id, recycling_id, file_id)
# Сохранение изменений
db.connection.commit()
db.save_csv_to_postgres(plot_csv, file_id)
clickhouse_client.save_csv_to_clickhouse(table_csv, file_id)
print('Загружено: ', experiment_parameters)
finally:
db.close()

1339
macros_generator.py Normal file

File diff suppressed because it is too large Load Diff

224
new_experiment_planner.py Normal file
View File

@ -0,0 +1,224 @@
import requests
import subprocess
import argparse
import yaml
import psutil
import time
import macros_generator as mg
from settings import settings
from clickhouse_tools import ClickHouseClient
import utils
from postgres_tools import PostgresClient
from pathlib import Path
from contextlib import contextmanager
# Загрузка конфигурации из файла config.yaml
with open('config.yaml', 'r') as config_file:
config = yaml.safe_load(config_file)
STARCCM_PATH = config['paths']['starccm']
CHEMKIN_PATH = config['paths']['chemkin']
MAIN_PATH = config['paths']['main']
NUMBER_PROCESSES = config['parameters']['number_processes']
MESH_BASE_SIZE = config['parameters']['mesh_base_size']
STOPPING_CRITERION = config['parameters']['stopping_criterion']
DIAMETERS = config['parameters']['diameters']
DEFAULT_VALUES = config['parameters']['default_values']
API_URL = config['api_url']
def download_file_from_fastapi(api_url, params, full_file_name):
try:
response = requests.post(api_url, json=params)
response.raise_for_status()
with open(full_file_name, "wb") as f:
f.write(response.content)
print("File downloaded successfully.")
except requests.RequestException as e:
print(f"Failed to download file: {e}")
def terminate_process_by_name(process_name):
for proc in psutil.process_iter(['pid', 'name']):
try:
if proc.info['name'] == process_name:
proc.terminate()
print(f"Process '{process_name}' with PID {proc.pid} was terminated.")
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess) as e:
print(f"Failed to terminate process '{process_name}': {e}")
def create_directory(path):
Path(path).mkdir(parents=True, exist_ok=True)
@contextmanager
def remove_file_on_exit(file_path):
try:
yield
finally:
if file_path.exists():
file_path.unlink()
def run_macros(macros_name, model_name=None, new_model=False, is_gpgpu=False):
np_value = '1 -gpgpu auto' if is_gpgpu else str(NUMBER_PROCESSES)
if new_model:
macros_command = f'{STARCCM_PATH} -np {np_value} -new -batch \'{macros_name}\''
else:
if model_name is None:
raise ValueError("model_name must be provided if new_model is False")
macros_command = f'{STARCCM_PATH} -np {np_value} \'{model_name}\' -batch \'{macros_name}\''
subprocess.run(["bash", "-c", macros_command], check=True)
def run_experiment(outer_blades_count, outer_blades_length, outer_blades_angle, middle_blades_count, load, recycling):
main_path = Path(MAIN_PATH)
create_directory(main_path)
geometry_path = f"{outer_blades_count}_{outer_blades_length}_{outer_blades_angle}_{middle_blades_count}"
geometry_path_full = main_path / geometry_path / 'geometry'
create_directory(geometry_path_full)
geometry_file_name = geometry_path_full / 'geometry.stp'
general_macros_path = main_path / geometry_path / 'general_macros'
create_directory(general_macros_path)
model_path = main_path / geometry_path / 'model'
create_directory(model_path)
model_file = model_path / 'init_model.sim'
experiments_path = main_path / geometry_path / 'experiments'
load_path = experiments_path / str(load)
load_parameters_path = load_path / 'parameters'
load_macros_path = load_path / 'macros'
load_model_path = load_path / 'model'
recycling_path = load_path / str(recycling)
recycling_macros_path = recycling_path / 'macros'
for directory in [experiments_path, load_path, load_parameters_path, load_macros_path, load_model_path,
recycling_path, recycling_macros_path]:
create_directory(directory)
load_parameters_file = load_parameters_path / f"load_{load}_parameters.yaml"
load_model_file = load_model_path / f"load_{load}.sim"
exp_file = recycling_path / f"recycling_{recycling}.sim"
# Проверка наличия файла init_model.sim
if not model_file.exists():
download_file_from_fastapi(API_URL, {"N1": outer_blades_count,
"L1": outer_blades_length,
"a1": outer_blades_angle,
"N2": middle_blades_count},
geometry_file_name)
prc_macros_file = general_macros_path / 'preprocess_macro.java'
model_parameters = {
'geometry_path': geometry_file_name,
'chemkin_path': CHEMKIN_PATH,
'init_model_folder': model_path,
'bladeCount': middle_blades_count,
'mesh_base_size': MESH_BASE_SIZE
}
mg.preprocessor_settings(prc_macros_file, model_parameters, model_file)
run_macros(prc_macros_file, new_model=True)
if not load_parameters_file.exists():
fuel_parameters = mg.load_calculation(float(load), DIAMETERS)
with open(load_parameters_file, 'w') as fuel_dict_file:
yaml.dump(fuel_parameters, fuel_dict_file, default_flow_style=False, allow_unicode=True)
else:
with open(load_parameters_file, 'r') as fuel_dict_file:
fuel_parameters = yaml.safe_load(fuel_dict_file)
# Проверка наличия файла load_{load}.sim
if not load_model_file.exists():
fuel_macros_file = load_macros_path / 'fuel_macro.java'
mg.fuel_settings(fuel_macros_file, fuel_parameters, load_model_file)
run_macros(fuel_macros_file, model_file)
# Проверка наличия файла recycling_{recycling}.sim
if not exp_file.exists():
rec_macros_file = recycling_macros_path / 'recycle_macro.java'
run_macros_file = recycling_macros_path / 'run_macros.java'
recycling_parameters = mg.recycling_calculation(
fuel_parameters['alpha'], fuel_parameters['gas_consumption'], fuel_parameters['air_consumption'],
float(recycling))
solver_parameters = {
'experiment_path': recycling_path,
'stopping_criterion': STOPPING_CRITERION
}
mg.fgm_table_settings(rec_macros_file, recycling_parameters, exp_file)
mg.setting_and_running_solver(run_macros_file, solver_parameters, exp_file)
run_macros(rec_macros_file, load_model_file)
run_macros(run_macros_file, exp_file, is_gpgpu=True)
experiment_parameters = {
'outer_blades_count': int(float(outer_blades_count)),
'outer_blades_length': outer_blades_length,
'outer_blades_angle': outer_blades_angle,
'middle_blades_count': int(float(middle_blades_count)),
'load': float(load),
'recycling': float(recycling),
}
fields_to_select = ['primary_air_consumption', 'secondary_air_consumption', 'gas_inlet_consumption']
load_parameters = {key: fuel_parameters[key] for key in fields_to_select}
load_parameters['load'] = float(load)
recycling_parameters['load'] = float(load)
recycling_parameters['recycling_level'] = float(recycling)
plot_csv = recycling_path / 'plot.csv'
table_csv = recycling_path / 'data_table.csv'
clickhouse_client = ClickHouseClient("localhost", 8123, settings.DATABASE, settings.CLICKHOUSE_USER,
settings.CLICKHOUSE_PASSWORD)
db = PostgresClient(
dbname=settings.DATABASE,
user=settings.POSTGRES_USER,
password=settings.POSTGRES_PASSWORD,
host="localhost",
port="5432"
)
file_id = utils.calculate_hash(experiment_parameters)
try:
clickhouse_client.save_csv_to_clickhouse(table_csv, file_id)
print("Clickhouse saved successfully")
load_id = db.insert_load_parameters(load_parameters)
recycling_id = db.insert_recycling_parameters(recycling_parameters, load_id)
db.insert_experiment_parameters(experiment_parameters, load_id, recycling_id, file_id)
db.connection.commit()
db.save_csv_to_postgres(plot_csv, file_id)
print("Postgres saved successfully")
finally:
db.close()
with remove_file_on_exit(exp_file.with_suffix(".sim~")):
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Запуск экспериментов с энергетической установкой ГМУ-45")
parser.add_argument("outer_blades_count", type=str, help="Количество лопаток во внешнем контуре")
parser.add_argument("outer_blades_length", type=str, help="Ширина лопаток во внешнем контуре")
parser.add_argument("outer_blades_angle", type=str, help="Угол наклона лопаток во внешнем контуре")
parser.add_argument("middle_blades_count", type=str, help="Количество лопаток в среднем контуре")
parser.add_argument("load", type=str, help="Паровая нагрузка")
parser.add_argument("recycling", type=str, help="Уровень рециркуляции уходящих газов")
args = parser.parse_args()
run_experiment(args.outer_blades_count, args.outer_blades_length, args.outer_blades_angle, args.middle_blades_count,
args.load, args.recycling)

View File

375
postgres_tools.py Normal file
View File

@ -0,0 +1,375 @@
import psycopg
import pandas as pd
class PostgresClient:
def __init__(self, dbname, user, password, host, port):
self.connection = psycopg.connect(
dbname=dbname,
user=user,
password=password,
host=host,
port=port
)
self.init_db()
def init_db(self):
with self.connection.cursor() as cur:
cur.execute("""
CREATE TABLE IF NOT EXISTS load_parameters (
id SERIAL PRIMARY KEY,
load NUMERIC NOT NULL UNIQUE,
primary_air_consumption NUMERIC NOT NULL,
secondary_air_consumption NUMERIC NOT NULL,
gas_inlet_consumption NUMERIC NOT NULL
);
CREATE TABLE IF NOT EXISTS recycling_parameters (
id SERIAL PRIMARY KEY,
load_id INTEGER NOT NULL,
recycling_level NUMERIC NOT NULL,
CO2 NUMERIC NOT NULL,
N2 NUMERIC NOT NULL,
H2O NUMERIC NOT NULL,
O2 NUMERIC NOT NULL,
UNIQUE(load_id, recycling_level),
FOREIGN KEY (load_id) REFERENCES load_parameters(id) ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS experiment_parameters (
id SERIAL PRIMARY KEY,
outer_blades_count INTEGER NOT NULL,
outer_blades_length NUMERIC NOT NULL,
outer_blades_angle NUMERIC NOT NULL,
middle_blades_count INTEGER NOT NULL,
load_id INTEGER NOT NULL,
recycling_id INTEGER NOT NULL,
experiment_hash CHAR(64) NOT NULL UNIQUE,
FOREIGN KEY (load_id) REFERENCES load_parameters(id) ON DELETE CASCADE,
FOREIGN KEY (recycling_id) REFERENCES recycling_parameters(id) ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS experiment_data (
id BIGSERIAL PRIMARY KEY,
Direction DOUBLE PRECISION,
Temperature DOUBLE PRECISION,
NOx DOUBLE PRECISION,
CO2 DOUBLE PRECISION,
CO DOUBLE PRECISION,
file_id CHAR(64) NOT NULL
);
""")
self.connection.commit()
def insert_load_parameters(self, load_parameters):
with self.connection.cursor() as cur:
cur.execute("SELECT id FROM load_parameters WHERE load = %s", (load_parameters['load'],))
load_id = cur.fetchone()
if load_id is None:
cur.execute("""
INSERT INTO load_parameters (load, primary_air_consumption, secondary_air_consumption, gas_inlet_consumption)
VALUES (%s, %s, %s, %s)
RETURNING id;
""", (load_parameters['load'], load_parameters['primary_air_consumption'],
load_parameters['secondary_air_consumption'], load_parameters['gas_inlet_consumption']))
load_id = cur.fetchone()[0]
else:
load_id = load_id[0]
self.connection.commit()
return load_id
def insert_recycling_parameters(self, recycling_parameters, load_id):
with self.connection.cursor() as cur:
cur.execute("SELECT id FROM recycling_parameters WHERE load_id = %s AND recycling_level = %s",
(load_id, recycling_parameters['recycling_level']))
recycling_id = cur.fetchone()
if recycling_id is None:
cur.execute("""
INSERT INTO recycling_parameters (load_id, recycling_level, CO2, N2, H2O, O2)
VALUES (%s, %s, %s, %s, %s, %s)
RETURNING id;
""", (load_id, recycling_parameters['recycling_level'], recycling_parameters['CO2'],
recycling_parameters['N2'], recycling_parameters['H2O'], recycling_parameters['O2']))
recycling_id = cur.fetchone()[0]
else:
recycling_id = recycling_id[0]
self.connection.commit()
return recycling_id
def insert_experiment_parameters(self, experiment_parameters, load_id, recycling_id, file_id):
with self.connection.cursor() as cur:
cur.execute("SELECT id FROM experiment_parameters WHERE experiment_hash = %s", (file_id,))
experiment_id = cur.fetchone()
if experiment_id is None:
cur.execute("""
INSERT INTO experiment_parameters (outer_blades_count, outer_blades_length, outer_blades_angle, middle_blades_count, load_id, recycling_id, experiment_hash)
VALUES (%s, %s, %s, %s, %s, %s, %s);
""", (experiment_parameters['outer_blades_count'], experiment_parameters['outer_blades_length'],
experiment_parameters['outer_blades_angle'], experiment_parameters['middle_blades_count'], load_id,
recycling_id, file_id))
self.connection.commit()
def get_load_parameters(self, load):
with self.connection.cursor() as cur:
cur.execute("SELECT * FROM load_parameters WHERE load = %s", (load,))
row = cur.fetchone()
if row:
return {
'load': row[1],
'primary_air_consumption': row[2],
'secondary_air_consumption': row[3],
'gas_inlet_consumption': row[4]
}
return None
def get_recycling_parameters(self, load, recycling_level):
with self.connection.cursor() as cur:
cur.execute("""
SELECT rp.* FROM recycling_parameters rp
JOIN load_parameters lp ON rp.load_id = lp.id
WHERE lp.load = %s AND rp.recycling_level = %s
""", (load, recycling_level))
row = cur.fetchone()
if row:
return {
'load': load,
'recycling_level': row[2],
'CO2': row[3],
'N2': row[4],
'H2O': row[5],
'O2': row[6]
}
return None
def get_experiment_parameters(self, experiment_hash):
with self.connection.cursor() as cur:
cur.execute("SELECT * FROM experiment_parameters WHERE experiment_hash = %s", (experiment_hash,))
row = cur.fetchone()
if row:
load_params = self.get_load_parameters(row[5])
recycling_params = self.get_recycling_parameters(load_params['load'], row[6])
return {
'outer_blades_count': row[1],
'outer_blades_length': row[2],
'outer_blades_angle': row[3],
'middle_blades_count': row[4],
'load': load_params['load'],
'recycling': recycling_params['recycling_level'],
'experiment_hash': row[7]
}
return None
def get_experiments(self):
# query = """
# SELECT
# ep.experiment_hash AS file_id,
# ep.outer_blades_count,
# ep.outer_blades_length,
# ep.outer_blades_angle,
# ep.middle_blades_count,
# lp.primary_air_consumption,
# lp.secondary_air_consumption,
# lp.gas_inlet_consumption,
# rp.n2,
# rp.o2,
# rp.h2o,
# rp.co2
# FROM
# experiment_parameters ep
# JOIN
# load_parameters lp ON ep.load_id = lp.id
# JOIN
# recycling_parameters rp ON ep.recycling_id = rp.id
# """
query = """
SELECT
ep.experiment_hash AS file_id,
ep.outer_blades_count,
ep.outer_blades_length,
ep.outer_blades_angle,
ep.middle_blades_count,
lp.load,
rp.recycling_level
FROM
experiment_parameters ep
JOIN
load_parameters lp ON ep.load_id = lp.id
JOIN
recycling_parameters rp ON ep.recycling_id = rp.id
"""
with self.connection.cursor() as cursor:
cursor.execute(query)
data = cursor.fetchall()
columns = [desc[0] for desc in cursor.description]
df = pd.DataFrame(data, columns=columns)
return df
def save_csv_to_postgres(self, csv_path, file_id):
try:
# Прочитать файл и добавить хэш как новую колонку
df = pd.read_csv(csv_path)
first_col = df.columns[0]
df = df[[first_col] + [col for col in df.columns if "Line Section: Direction [-1,0,0] (m)" not in col]]
# Переименовать колонки
rename_dict = {
"Line Section: Direction [-1,0,0] (m)": "Direction",
"Line Section: Temperature (K)": "Temperature",
"Line Section: Mass Fraction of Nitrogen Oxide Emission": "NOx",
"Line Section: Mass Fraction of CO2": "CO2",
"Line Section: Mass Fraction of CO": "CO"
}
df.rename(columns=rename_dict, inplace=True)
df['file_id'] = file_id
with self.connection.cursor() as cur:
cur.execute("SELECT file_id FROM experiment_data WHERE file_id = %s", (file_id,))
row = cur.fetchone()
if row:
cur.execute("DELETE FROM experiment_data WHERE file_id = %s", (file_id,))
self.connection.commit()
# Вставка новых данных из DataFrame в таблицу
insert_query = '''
INSERT INTO experiment_data (Direction, Temperature, NOx, CO2, CO, file_id)
VALUES (%s, %s, %s, %s, %s, %s)
'''
data_to_insert = df.to_records(index=False).tolist()
cur.executemany(insert_query, data_to_insert)
self.connection.commit()
# Закрытие соединения
cur.close()
return "Success"
except Exception as e:
return f"Failed: {str(e)}"
def get_data(self):
query = """
WITH max_temp AS (
SELECT
file_id,
temperature AS max_temperature,
direction AS direction_for_max_temp,
ROW_NUMBER() OVER (PARTITION BY file_id ORDER BY temperature DESC) AS temp_rank
FROM
experiment_data
),
max_co2 AS (
SELECT
file_id,
co2 AS max_co2,
direction AS direction_for_max_co2,
ROW_NUMBER() OVER (PARTITION BY file_id ORDER BY co2 DESC) AS co2_rank
FROM
experiment_data
),
max_co AS (
SELECT
file_id,
co AS max_co,
direction AS direction_for_max_co,
ROW_NUMBER() OVER (PARTITION BY file_id ORDER BY co DESC) AS co_rank
FROM
experiment_data
),
max_nox AS (
SELECT
file_id,
nox AS max_nox,
direction AS direction_for_max_nox,
ROW_NUMBER() OVER (PARTITION BY file_id ORDER BY nox DESC) AS nox_rank
FROM
experiment_data
)
SELECT
t.file_id,
t.direction_for_max_temp,
t.max_temperature,
cx.direction_for_max_co2,
cx.max_co2,
c.direction_for_max_co,
c.max_co,
n.direction_for_max_nox,
n.max_nox
FROM
(SELECT * FROM max_temp WHERE temp_rank = 1) t
LEFT JOIN
(SELECT * FROM max_nox WHERE nox_rank = 1) n ON t.file_id = n.file_id
LEFT JOIN
(SELECT * FROM max_co2 WHERE co2_rank = 1) cx ON t.file_id = cx.file_id
LEFT JOIN
(SELECT * FROM max_co WHERE co_rank = 1) c ON t.file_id = c.file_id;
"""
with self.connection.cursor() as cursor:
cursor.execute(query)
data = cursor.fetchall()
columns = [desc[0] for desc in cursor.description]
df = pd.DataFrame(data, columns=columns)
return df
def close(self):
self.connection.close()
# Основной скрипт
# def main():
# # Данные
# experiment_parameters = {
# 'outer_blades_count': 24,
# 'outer_blades_length': 74.0,
# 'outer_blades_angle': 65.0,
# 'middle_blades_count': 18,
# 'load': 315.0,
# 'recycling': 8.0,
# }
#
# load_parameters = {
# 'load': 315.0,
# 'primary_air_consumption': 15.2239,
# 'secondary_air_consumption': 63.9876,
# 'gas_inlet_consumption': 0.8648
# }
#
# recycling_parameters = {
# 'load': 315.0,
# 'recycling_level': 8.0,
# 'CO2': 0.04,
# 'N2': 0.70,
# 'H2O': 0.06,
# 'O2': 0.20
# }
#
# # Инициализация базы данных
# db = PostgresClient(
# dbname="your_db_name",
# user="your_db_user",
# password="your_db_password",
# host="your_db_host",
# port="your_db_port"
# )
#
# try:
#
# # Извлечение и печать данных
# retrieved_experiment = db.get_experiment_parameters(experiment_parameters['experiment_hash'])
# print("Retrieved experiment parameters:", retrieved_experiment)
#
# retrieved_load = db.get_load_parameters(load_parameters['load'])
# print("Retrieved load parameters:", retrieved_load)
#
# retrieved_recycling = db.get_recycling_parameters(recycling_parameters['load'],
# recycling_parameters['recycling_level'])
# print("Retrieved recycling parameters:", retrieved_recycling)
# finally:
# db.close()
#
#
# if __name__ == "__main__":
# main()

8
requirements.txt Normal file
View File

@ -0,0 +1,8 @@
requests
psutil
PyYAML
psycopg[binary]
clickhouse-driver
pandas
pydantic-settings
clickhouse-connect

56
runner.py Normal file
View File

@ -0,0 +1,56 @@
import subprocess
import time
def run_script():
# outer_blades_count = [12.0, 24.0, 36.0, 48.0]
# outer_blades_count = [24.0, 36.0]
outer_blades_count = [24.0]
# outer_blades_length = [44.0, 86.0, 107.5]
outer_blades_length = [70.0]
# outer_blades_angle = [30.0, 45.0, 60.0, 75.0]
outer_blades_angle = [45.0, 60.0]
middle_blades_count = [9.0, 27.0, 36.0]
# middle_blades_count = 18.0
# load = [190, 260, 315, 400, 465]
load = [315, 400, 465]
# load_eco = [260, 315, 400, 465]
# load = [190, 260, 465]
# recycling_eco = [0, 7, 14, 21, 28]
# recycling_full = [0, 5, 8, 10, 15, 20, 25, 30]
recycling_full = [0, 5, 10, 20]
# recycling = [0, 6, 12, 18, 24, 30]
# recycling = [0, 5, 8]
for middle_count in middle_blades_count:
for length in outer_blades_length:
for outer_blade in outer_blades_count:
for angle in outer_blades_angle:
for current_load in load:
for current_recycling in recycling_full:
# Начало замера времени
start_time = time.time()
result = subprocess.run(['python', 'new_experiment_planner.py', str(outer_blade),
str(length), str(angle), str(middle_count),
str(current_load), str(current_recycling)], capture_output=True, text=True)
# Конец замера времени
end_time = time.time()
# Вычисление времени выполнения
execution_time = end_time - start_time
print("Output of the script:")
print(result.stdout)
print("Errors (if any):")
print(result.stderr)
print("Execution time:", execution_time)
if __name__ == "__main__":
run_script()

58
runner_db.py Normal file
View File

@ -0,0 +1,58 @@
from insert_to_db import add_data_to_db
import macros_generator as mg
import yaml
# Загрузка конфигурации из файла config.yaml
with open('config.yaml', 'r') as config_file:
config = yaml.safe_load(config_file)
DIAMETERS = config['parameters']['diameters']
# outer_blades_count = [12, 24, 36, 48]
outer_blades_count = 24.0
# outer_blades_length = [44, 70, 86, 107.5]
outer_blades_length = 70.0
outer_blades_angle = [30.0, 45.0, 60.0, 75.0]
# middle_blades_count = [9, 18, 27, 36]
middle_blades_count = 18.0
# load = [190, 260, 315, 400, 465]
# load_eco = [260, 315, 400, 465]
load = [190, 260, 315, 400, 465]
# recycling_eco = [0, 7, 14, 21, 28]
recycling_full = [0, 5, 8, 10, 15, 20, 25, 30]
# recycling = [0, 6, 12, 18, 24, 30]
# recycling = [0, 5, 8]
for angle in outer_blades_angle:
for current_load in load:
for current_recycling in recycling_full:
experiment_parameters = {
'outer_blades_count': outer_blades_count,
'outer_blades_length': outer_blades_length,
'outer_blades_angle': angle,
'middle_blades_count': middle_blades_count,
'load': current_load,
'recycling': current_recycling,
}
_cur_diameters = DIAMETERS.copy()
fuel_parameters = mg.load_calculation(float(current_load), _cur_diameters)
load_parameters = {
'load': current_load,
'primary_air_consumption': fuel_parameters['primary_air_consumption'],
'secondary_air_consumption': fuel_parameters['secondary_air_consumption'],
'gas_inlet_consumption': fuel_parameters['gas_inlet_consumption']
}
recycling_parameters = mg.recycling_calculation(
fuel_parameters['alpha'], fuel_parameters['gas_consumption'], fuel_parameters['air_consumption'],
float(current_recycling))
recycling_parameters['load'] = float(current_load)
recycling_parameters['recycling_level'] = float(current_recycling)
add_data_to_db(experiment_parameters, load_parameters, recycling_parameters)

13
settings.py Normal file
View File

@ -0,0 +1,13 @@
from pydantic_settings import BaseSettings, SettingsConfigDict
class Settings(BaseSettings):
DATABASE: str
POSTGRES_USER: str
POSTGRES_PASSWORD: str
CLICKHOUSE_USER: str
CLICKHOUSE_PASSWORD: str
model_config = SettingsConfigDict(env_file=".env")
settings = Settings()

33
test_db.py Normal file
View File

@ -0,0 +1,33 @@
from clickhouse_tools import ClickHouseClient
from settings import settings
from pathlib import Path
import yaml
import utils
experiment_parameters = {
'outer_blades_count': 24.0,
'outer_blades_length': 70.0,
'outer_blades_angle': 60.0,
'middle_blades_count': 18.0,
'load': 400,
'recycling': 15,
}
with open('config.yaml', 'r') as config_file:
config = yaml.safe_load(config_file)
MAIN_PATH = config['paths']['main']
main_path = Path(MAIN_PATH)
geometry_path = (f"{experiment_parameters['outer_blades_count']}_{experiment_parameters['outer_blades_length']}_"
f"{experiment_parameters['outer_blades_angle']}_{experiment_parameters['middle_blades_count']}")
experiments_path = main_path / geometry_path / 'experiments'
load_path = experiments_path / str(experiment_parameters['load'])
recycling_path = load_path / str(experiment_parameters['recycling'])
table_csv = recycling_path / 'data_table.csv'
file_id = utils.calculate_hash(experiment_parameters)
clickhouse_client = ClickHouseClient("localhost", 8123, 'SuperService', 'UserMyHouse',
'NotWarningWord2')
clickhouse_client.save_csv_to_clickhouse(table_csv, file_id)

7
utils.py Normal file
View File

@ -0,0 +1,7 @@
import hashlib
import json
def calculate_hash(experiment_params):
params_str = json.dumps(experiment_params, sort_keys=True)
return hashlib.sha256(params_str.encode()).hexdigest()