перегоняем csv в бд, больше половины успешно пхпхпх

This commit is contained in:
HellsSenju 2024-10-15 00:10:09 +04:00
parent 3fd4c5ccca
commit 66df83c2a5
13 changed files with 316 additions and 18 deletions

7
.env
View File

@ -1,5 +1,10 @@
DB_USER=postgres
DB_PASSWORD=password
DB_HOST=localhost
DB_PORT=5432
DB_NAME=test
DATABASE=SuperService
POSTGRES_USER=UserSuperService
POSTGRES_PASSWORD=NotWarningWord1
CLICKHOUSE_USER=UserMyHouse
CLICKHOUSE_PASSWORD=NotWarningWord2
CLICKHOUSE_PASSWORD=NotWarningWord2

View File

@ -55,8 +55,16 @@ docker exec -it clickhouse-db clickhouse-client -u UserMyHouse --password NotWar
```
USE SuperService;
```
## 7. Подключение к PostgreSQL
## 8. Подключение к PostgreSQL
Команда входа в ClickHouse
```
docker exec -it postgres-db psql -U UserSuperService -d SuperService
```
## 9. Миграция alembic
```
alembic revision --autogenerate
```
```
alembic upgrade head
```

117
alembic.ini Normal file
View File

@ -0,0 +1,117 @@
# A generic, single database configuration.
[alembic]
# path to migration scripts
# Use forward slashes (/) also on windows to provide an os agnostic path
script_location = db/migrations
# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
# Uncomment the line below if you want the files to be prepended with date and time
# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
# for all available tokens
# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
# sys.path path, will be prepended to sys.path if present.
# defaults to the current working directory.
prepend_sys_path = . db
# timezone to use when rendering the date within the migration file
# as well as the filename.
# If specified, requires the python>=3.9 or backports.zoneinfo library.
# Any required deps can installed by adding `alembic[tz]` to the pip requirements
# string value is passed to ZoneInfo()
# leave blank for localtime
# timezone =
# max length of characters to apply to the "slug" field
# truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
# version location specification; This defaults
# to db/migrations/versions. When using multiple version
# directories, initial revisions must be specified with --version-path.
# The path separator used here should be the separator specified by "version_path_separator" below.
# version_locations = %(here)s/bar:%(here)s/bat:db/migrations/versions
# version path separator; As mentioned above, this is the character used to split
# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
# Valid values for version_path_separator are:
#
# version_path_separator = :
# version_path_separator = ;
# version_path_separator = space
# version_path_separator = newline
version_path_separator = os # Use os.pathsep. Default configuration used for new projects.
# set to 'true' to search source files recursively
# in each "version_locations" directory
# new in Alembic version 1.10
# recursive_version_locations = false
# the output encoding used when revision files
# are written from script.py.mako
# output_encoding = utf-8
sqlalchemy.url = driver://user:pass@localhost/dbname
[post_write_hooks]
# post_write_hooks defines scripts or Python functions that are run
# on newly generated revision scripts. See the documentation for further
# detail and examples
# format using "black" - use the console_scripts runner, against the "black" entrypoint
# hooks = black
# black.type = console_scripts
# black.entrypoint = black
# black.options = -l 79 REVISION_SCRIPT_FILENAME
# lint with attempts to fix using "ruff" - use the exec runner, execute a binary
# hooks = ruff
# ruff.type = exec
# ruff.executable = %(here)s/.venv/bin/ruff
# ruff.options = --fix REVISION_SCRIPT_FILENAME
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S

View File

@ -7,12 +7,22 @@ class Settings(BaseSettings):
DB_HOST: str
DB_PORT: int
DB_NAME: str
DATABASE: str
POSTGRES_USER: str
POSTGRES_PASSWORD: str
CLICKHOUSE_USER: str
CLICKHOUSE_PASSWORD: str
@property
def db_url_asyncpg(self):
# 'postgresql+asyncpg://username:password@localhost:5432/database_name'
return f'postgresql+asyncpg://{self.DB_USER}:{self.DB_PASSWORD}@{self.DB_HOST}:{self.DB_PORT}/{self.DB_NAME}'
@property
def db_url_asyncpg_docker(self):
# 'postgresql+asyncpg://username:password@localhost:5432/database_name'
return f'postgresql+asyncpg://{self.POSTGRES_USER}:{self.POSTGRES_PASSWORD}@db:{self.DB_PORT}/{self.DATABASE}'
model_config = SettingsConfigDict(env_file='../.env')

View File

@ -1,10 +1,56 @@
import pandas as pd
from sqlalchemy.ext.asyncio import create_async_engine, async_sessionmaker
from sqlalchemy import insert
from db.config import settings
from db.models.base import Base
from db.models.experiment_data_model import ExperimentData
from db.models.experiment_parameters_model import ExperimentParameters
from db.models.load_parameters_model import LoadParameters
from db.models.recycling_parameters_model import RecyclingParameters
import asyncio
engine = create_async_engine(url=settings.db_url_asyncpg, echo=True)
async_session = async_sessionmaker(engine)
df = pd.read_csv('./files/recycling_parameters.csv')
headers = df.columns.tolist()
print(headers)
for header in headers:
column_type = df[header].dtype
print(column_type)
async def create_all_tables():
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all)
async def drop_all_tables():
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
async def load_data_to_db(file: str, model_class):
async with async_session() as session:
df = pd.read_csv(file).dropna()
# Преобразование данных из DataFrame в формат, подходящий для SQLAlchemy
data_records = df.to_dict(orient='records')
# Пакетная вставка всех записей
stmt = insert(model_class).values(data_records)
await session.execute(stmt)
await session.commit()
async def main():
# await drop_all_tables()
# await create_all_tables()
await load_data_to_db('./files/experiment_data.csv', ExperimentData)
# await load_data_to_db('./files/load_parameters.csv', LoadParameters)
# await load_data_to_db('./files/recycling_parameters.csv', RecyclingParameters)
# await load_data_to_db('./files/experiment_parameters.csv', ExperimentParameters)
if __name__ == '__main__':
asyncio.run(main())
# df = pd.read_csv('./files/experiment_data.csv')
# headers = df.columns.tolist()
# print(headers)
#
# for header in headers:
# column_type = df[header].dtype
# print(column_type)

74
db/migrations/env.py Normal file
View File

@ -0,0 +1,74 @@
from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
from db.config import settings
from db.models.experiment_data_model import ExperimentData
from db.models.experiment_parameters_model import ExperimentParameters
from db.models.load_parameters_model import LoadParameters
from db.models.recycling_parameters_model import RecyclingParameters
from db.models.base import Base
config = context.config
if config.config_file_name is not None:
fileConfig(config.config_file_name)
config.set_main_option('sqlalchemy.url', settings.db_url_asyncpg + '?async_fallback=True')
target_metadata = Base.metadata
def run_migrations_offline() -> None:
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online() -> None:
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section, {}),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()

View File

@ -0,0 +1,26 @@
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision: str = ${repr(up_revision)}
down_revision: Union[str, None] = ${repr(down_revision)}
branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
def upgrade() -> None:
${upgrades if upgrades else "pass"}
def downgrade() -> None:
${downgrades if downgrades else "pass"}

View File

@ -1,4 +1,4 @@
from data_base.db import Base
from db.models.base import Base
from sqlalchemy.orm import Mapped, mapped_column
class ChExperimentDBExperimentData(Base):

View File

@ -1,16 +1,18 @@
from base import Base
from typing import Optional
from db.models.base import Base
from sqlalchemy.orm import Mapped, mapped_column
class ExperimentData(Base):
__tablename__ = 'experiment_data'
id: Mapped[int] = mapped_column(primary_key=True, autoincrement=True)
id: Mapped[int] = mapped_column(primary_key=True)
direction: Mapped[float]
temperature: Mapped[float]
nox: Mapped[float]
co2: Mapped[float]
co: Mapped[float]
file_id: Mapped[str]
file_id: Mapped[Optional[str]]
def __repr__(self):
return f"<ExperimentData>"

View File

@ -2,17 +2,18 @@ from typing import Optional
from sqlalchemy import ForeignKey
from base import Base
from db.models.base import Base
from sqlalchemy.orm import Mapped, mapped_column
class ExperimentParameters(Base):
__tablename__ = 'experiment_parameters'
id: Mapped[int] = mapped_column(primary_key=True, autoincrement=True)
id: Mapped[int] = mapped_column(primary_key=True)
outer_blades_count: Mapped[int]
outer_blades_length: Mapped[float]
outer_blades_angle: Mapped[float]
middle_blades_count: Mapped[int]
# load_id: Mapped[int]
load_id: Mapped[Optional[int]] = mapped_column(ForeignKey('load_parameters.id', ondelete='SET NULL'))
recycling_id: Mapped[Optional[int]] = mapped_column(ForeignKey('recycling_parameters.id', ondelete='SET NULL'))
experiment_hash: Mapped[str]

View File

@ -1,10 +1,10 @@
from base import Base
from db.models.base import Base
from sqlalchemy.orm import Mapped, mapped_column
class LoadParameters(Base):
__tablename__ = 'load_parameters'
id: Mapped[int] = mapped_column(primary_key=True, autoincrement=True)
id: Mapped[int] = mapped_column(primary_key=True)
load: Mapped[int]
primary_air_consumption: Mapped[float]
secondary_air_consumption: Mapped[float]

View File

@ -2,13 +2,13 @@ from typing import Optional
from sqlalchemy import ForeignKey
from base import Base
from db.models.base import Base
from sqlalchemy.orm import Mapped, mapped_column
class RecyclingParameters(Base):
__tablename__ = 'recycling_parameters'
id: Mapped[int] = mapped_column(primary_key=True, autoincrement=True)
id: Mapped[int] = mapped_column(primary_key=True)
load_id: Mapped[Optional[int]] = mapped_column(ForeignKey('load_parameters.id', ondelete='SET NULL'))
recycling_level: Mapped[int]
co2: Mapped[float]

View File

@ -29,3 +29,12 @@ def run_experiment_api(params: ExperimentParameters):
return {"status": "success", "message": "Experiment started successfully."}
except Exception as e:
raise HTTPException(status_code=500, detail=f"An error occurred: {str(e)}")
@app.post('/init_db_data')
def init_db_data():
try:
return {"status": "success", "message": "Experiment started successfully."}
except Exception as e:
raise HTTPException(status_code=500, detail=f"An error occurred: {str(e)}")