Compare commits

..

37 Commits

Author SHA1 Message Date
528309ab84 Merge pull request 'kashin_maxim_lab_5' (#124) from kashin_maxim_lab_5 into main
Reviewed-on: #124
2024-11-20 22:45:51 +04:00
0814d8533d Merge pull request 'kashin_maxim_lab_4' (#123) from kashin_maxim_lab_4 into main
Reviewed-on: #123
2024-11-20 22:45:28 +04:00
354ee2679e Merge pull request 'yakovleva_yulia_lab_8 is ready' (#122) from yakovleva_yulia_lab_8 into main
Reviewed-on: #122
2024-11-20 22:45:02 +04:00
d302bd2213 Merge pull request 'yakovleva_yulia_lab_7 is ready' (#121) from yakovleva_yulia_lab_7 into main
Reviewed-on: #121
2024-11-20 22:44:39 +04:00
2aed7bf385 Merge pull request 'yakovleva_yulia_lab_6 is ready' (#120) from yakovleva_yulia_lab_6 into main
Reviewed-on: #120
2024-11-20 22:44:06 +04:00
d4e24db25e Merge pull request 'kadyrov_aydar_lab_5' (#119) from kadyrov_aydar_lab_5 into main
Reviewed-on: #119
2024-11-20 22:43:23 +04:00
c0ca1d4bb5 Merge pull request 'kadyrov_aydar_lab_4' (#117) from kadyrov_aydar_lab_4 into main
Reviewed-on: #117
2024-11-20 22:43:05 +04:00
6eeb90ea45 Merge pull request 'tukaeva_alfiya_lab_8' (#116) from tukaeva_alfiya_lab_8 into main
Reviewed-on: #116
2024-11-20 22:38:42 +04:00
bc2d7cb2f6 Merge pull request 'tukaeva_alfiya_lab_7' (#115) from tukaeva_alfiya_lab_7 into main
Reviewed-on: #115
2024-11-20 22:37:46 +04:00
e1da6f26ab Merge pull request 'tukaeva_alfiya_lab_6' (#114) from tukaeva_alfiya_lab_6 into main
Reviewed-on: #114
2024-11-20 22:37:01 +04:00
e5df53b5c2 Merge pull request 'turner_ilya_lab_2' (#113) from turner_ilya_lab_2 into main
Reviewed-on: #113
2024-11-20 22:36:40 +04:00
c98770752e Merge pull request 'mochalov_danila_lab_3' (#112) from mochalov_danila_lab_3 into main
Reviewed-on: #112
2024-11-20 22:36:16 +04:00
a800c3df86 Merge pull request 'Bazunov Andrew Lab 4' (#111) from bazunov_andrew_lab_4 into main
Reviewed-on: #111
2024-11-20 22:35:35 +04:00
a51e33a201 Merge pull request 'turner_ilya_lab_1' (#110) from turner_ilya_lab_1 into main
Reviewed-on: #110
2024-11-20 22:34:54 +04:00
a9af84010a Merge pull request 'Bazunov Andrew lab3' (#109) from bazunov_andrew_lab_3 into main
Reviewed-on: #109
2024-11-20 22:34:26 +04:00
3645d0c1cd Merge pull request 'yakovleva_yulia_lab_5 is ready' (#107) from yakovleva_yulia_lab_5 into main
Reviewed-on: #107
Reviewed-by: Alexey <a.zhelepov@mail.ru>
2024-11-20 22:33:27 +04:00
08f2f63ad4 Готово 2024-10-27 19:42:27 +04:00
e4e3748a3d Выполнено 2024-10-27 19:09:16 +04:00
JulYakJul
5e522fbcc0 yakovleva_yulia_lab_8 is ready 2024-10-27 15:10:30 +04:00
JulYakJul
cae7189c1e fix 2024-10-27 14:06:02 +04:00
JulYakJul
2bfc8a0a43 yakovleva_yulia_lab_7 is ready 2024-10-27 14:02:15 +04:00
JulYakJul
1f89960672 fix 2024-10-27 13:06:24 +04:00
JulYakJul
ffb4c2a8a4 yakovleva_yulia_lab_6 is ready 2024-10-27 13:04:11 +04:00
NAP
1dc621e0be kadyrov_aydar_lab_5 2024-10-27 02:16:28 +04:00
NAP
11c62d9bf7 kadyrov_aydar_lab_5 2024-10-27 02:13:51 +04:00
NAP
03910a9a3f kadyrov_aydar_lab_4 2024-10-27 01:53:34 +04:00
f7d483196c tukaeva_alfiya_lab_8 is ready 2024-10-26 23:16:19 +04:00
545377f948 tukaeva_alfiya_lab_7 fix 2024-10-26 22:58:30 +04:00
bb867da520 tukaeva_alfiya_lab_7 is ready 2024-10-26 22:41:45 +04:00
c4a260ebda tukaeva_alfiya_lab_6 is ready 2024-10-26 22:26:14 +04:00
88392a8041 turner_ilya_lab_2 is ready 2024-10-26 21:09:43 +04:00
JulYakJul
400de30b49 fix 2024-10-26 20:04:39 +04:00
96a4e6ac43 mochalov_danila_lab_3 is ready 2024-10-26 18:18:28 +04:00
Bazunov Andrew Igorevich
03c52d0c76 Complete lab4 2024-10-26 17:51:52 +04:00
6dd4835f54 turner_ilya_lab_1 is ready 2024-10-26 17:34:47 +04:00
Bazunov Andrew Igorevich
5187005e6a complete lab 3 2024-10-26 14:46:33 +04:00
JulYakJul
a5f0403627 yakovleva_yulia_lab_5 is ready 2024-10-25 18:12:36 +04:00
177 changed files with 5606 additions and 786 deletions

57
.gitignore vendored
View File

@ -1,57 +0,0 @@
################################################################################
# Данный GITIGNORE-файл был автоматически создан Microsoft(R) Visual Studio.
################################################################################
/.vs/DAS_2024_1
/.vs
/aleikin_artem_lab_3/.vs
/aleikin_artem_lab_3/ProjectEntityProject/bin/Debug/net8.0
/aleikin_artem_lab_3/ProjectEntityProject/obj
/aleikin_artem_lab_3/TaskProject/bin/Debug/net8.0
/aleikin_artem_lab_3/TaskProject/obj/Container
/aleikin_artem_lab_3/TaskProject/obj
/aleikin_artem_lab_4/RVIPLab4/.vs
/aleikin_artem_lab_4/RVIPLab4/Consumer1/bin/Debug/net8.0
/aleikin_artem_lab_4/RVIPLab4/Consumer1/obj
/aleikin_artem_lab_4/RVIPLab4/Consumer2/bin/Debug/net8.0
/aleikin_artem_lab_4/RVIPLab4/Consumer2/obj
/aleikin_artem_lab_4/RVIPLab4/FirstTutorial/Receive/bin/Debug/net8.0
/aleikin_artem_lab_4/RVIPLab4/FirstTutorial/Receive/obj
/aleikin_artem_lab_4/RVIPLab4/FirstTutorial/Send/bin/Debug/net8.0
/aleikin_artem_lab_4/RVIPLab4/FirstTutorial/Send/obj
/aleikin_artem_lab_4/RVIPLab4/Publisher/bin/Debug/net8.0
/aleikin_artem_lab_4/RVIPLab4/Publisher/obj
/aleikin_artem_lab_4/RVIPLab4/SecondTutorial/NewTask/bin/Debug/net8.0
/aleikin_artem_lab_4/RVIPLab4/SecondTutorial/NewTask/obj
/aleikin_artem_lab_4/RVIPLab4/SecondTutorial/Worker/bin/Debug/net8.0
/aleikin_artem_lab_4/RVIPLab4/SecondTutorial/Worker/obj
/aleikin_artem_lab_4/RVIPLab4/ThirdTutorial/EmitLog/bin/Debug/net8.0
/aleikin_artem_lab_4/RVIPLab4/ThirdTutorial/EmitLog/obj
/aleikin_artem_lab_4/RVIPLab4/ThirdTutorial/ReceiveLogs/bin/Debug/net8.0
/aleikin_artem_lab_4/RVIPLab4/ThirdTutorial/ReceiveLogs/obj
/dozorova_alena_lab_2
/dozorova_alena_lab_3
/dozorova_alena_lab_4
/dozorova_alena_lab_5/ConsoleApp1/obj
/dozorova_alena_lab_6/ConsoleApp1/obj
/aleikin_artem_lab_4/RVIPLab4/RVIPLab4.sln
/aleikin_artem_lab_4/.vs
/aleikin_artem_lab_4/Consumer1/bin/Debug/net8.0
/aleikin_artem_lab_4/Consumer1/obj
/aleikin_artem_lab_4/Consumer2/bin/Debug/net8.0
/aleikin_artem_lab_4/Consumer2/obj
/aleikin_artem_lab_4/FirstTutorial/Receive/bin/Debug/net8.0
/aleikin_artem_lab_4/FirstTutorial/Receive/obj
/aleikin_artem_lab_4/FirstTutorial/Send/bin/Debug/net8.0
/aleikin_artem_lab_4/FirstTutorial/Send/obj
/aleikin_artem_lab_4/Publisher/bin/Debug/net8.0
/aleikin_artem_lab_4/Publisher/obj
/aleikin_artem_lab_4/SecondTutorial/NewTask/bin/Debug/net8.0
/aleikin_artem_lab_4/SecondTutorial/NewTask/obj
/aleikin_artem_lab_4/SecondTutorial/Worker/bin/Debug/net8.0
/aleikin_artem_lab_4/SecondTutorial/Worker/obj
/aleikin_artem_lab_4/ThirdTutorial/EmitLog/bin/Debug/net8.0
/aleikin_artem_lab_4/ThirdTutorial/EmitLog/obj
/aleikin_artem_lab_4/ThirdTutorial/ReceiveLogs/bin/Debug/net8.0
/aleikin_artem_lab_4/ThirdTutorial/ReceiveLogs/obj
/aleikin_artem_lab_4/RVIPLab4.sln

View File

@ -1,30 +0,0 @@
**/.classpath
**/.dockerignore
**/.env
**/.git
**/.gitignore
**/.project
**/.settings
**/.toolstarget
**/.vs
**/.vscode
**/*.*proj.user
**/*.dbmdl
**/*.jfm
**/azds.yaml
**/bin
**/charts
**/docker-compose*
**/Dockerfile*
**/node_modules
**/npm-debug.log
**/obj
**/secrets.dev.yaml
**/values.dev.yaml
LICENSE
README.md
!**/.gitignore
!.git/HEAD
!.git/config
!.git/packed-refs
!.git/refs/heads/**

View File

@ -1,17 +0,0 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net8.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<DockerDefaultTargetOS>Linux</DockerDefaultTargetOS>
<DockerfileContext>.</DockerfileContext>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Microsoft.VisualStudio.Azure.Containers.Tools.Targets" Version="1.21.0" />
<PackageReference Include="RabbitMQ.Client" Version="7.0.0" />
</ItemGroup>
</Project>

View File

@ -1,6 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="Current" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<ActiveDebugProfile>Container (Dockerfile)</ActiveDebugProfile>
</PropertyGroup>
</Project>

View File

@ -1,28 +0,0 @@
# См. статью по ссылке https://aka.ms/customizecontainer, чтобы узнать как настроить контейнер отладки и как Visual Studio использует этот Dockerfile для создания образов для ускорения отладки.
# Этот этап используется при запуске из VS в быстром режиме (по умолчанию для конфигурации отладки)
FROM mcr.microsoft.com/dotnet/runtime:8.0 AS base
USER app
WORKDIR /app
# Этот этап используется для сборки проекта службы
FROM mcr.microsoft.com/dotnet/sdk:8.0 AS build
ARG BUILD_CONFIGURATION=Release
WORKDIR /src
COPY ["Consumer1.csproj", "."]
RUN dotnet restore "./Consumer1.csproj"
COPY . .
WORKDIR "/src/."
RUN dotnet build "./Consumer1.csproj" -c $BUILD_CONFIGURATION -o /app/build
# Этот этап используется для публикации проекта службы, который будет скопирован на последний этап
FROM build AS publish
ARG BUILD_CONFIGURATION=Release
RUN dotnet publish "./Consumer1.csproj" -c $BUILD_CONFIGURATION -o /app/publish /p:UseAppHost=false
# Этот этап используется в рабочей среде или при запуске из VS в обычном режиме (по умолчанию, когда конфигурация отладки не используется)
FROM base AS final
WORKDIR /app
COPY --from=publish /app/publish .
ENTRYPOINT ["dotnet", "Consumer1.dll"]

View File

@ -1,39 +0,0 @@
using RabbitMQ.Client;
using RabbitMQ.Client.Events;
using System.Text;
var factory = new ConnectionFactory
{
HostName = "rabbitmq",
UserName = "admin",
Password = "admin"
};
using var connection = await factory.CreateConnectionAsync();
using var channel = await connection.CreateChannelAsync();
var queueName = "slow_queue";
var exchangeName = "logs_exchange";
await channel.QueueDeclareAsync(queue: queueName, durable: true, exclusive: false, autoDelete: false, arguments: null);
await channel.QueueBindAsync(queue: queueName, exchange: exchangeName, routingKey: "");
Console.WriteLine("[Consumer1] Waiting for messages...");
while (true)
{
var consumer = new AsyncEventingBasicConsumer(channel);
consumer.ReceivedAsync += (model, ea) =>
{
var body = ea.Body.ToArray();
var message = Encoding.UTF8.GetString(body);
Console.WriteLine($"[Consumer1] Received: {message}");
Thread.Sleep(new Random().Next(2000, 3000));
Console.WriteLine("[Consumer1] Done processing");
channel.BasicAckAsync(deliveryTag: ea.DeliveryTag, multiple: false);
return Task.CompletedTask;
};
await channel.BasicConsumeAsync(queue: queueName, autoAck: false, consumer: consumer);
}

View File

@ -1,10 +0,0 @@
{
"profiles": {
"Consumer1": {
"commandName": "Project"
},
"Container (Dockerfile)": {
"commandName": "Docker"
}
}
}

View File

@ -1,30 +0,0 @@
**/.classpath
**/.dockerignore
**/.env
**/.git
**/.gitignore
**/.project
**/.settings
**/.toolstarget
**/.vs
**/.vscode
**/*.*proj.user
**/*.dbmdl
**/*.jfm
**/azds.yaml
**/bin
**/charts
**/docker-compose*
**/Dockerfile*
**/node_modules
**/npm-debug.log
**/obj
**/secrets.dev.yaml
**/values.dev.yaml
LICENSE
README.md
!**/.gitignore
!.git/HEAD
!.git/config
!.git/packed-refs
!.git/refs/heads/**

View File

@ -1,17 +0,0 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net8.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<DockerDefaultTargetOS>Linux</DockerDefaultTargetOS>
<DockerfileContext>.</DockerfileContext>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Microsoft.VisualStudio.Azure.Containers.Tools.Targets" Version="1.21.0" />
<PackageReference Include="RabbitMQ.Client" Version="7.0.0" />
</ItemGroup>
</Project>

View File

@ -1,6 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="Current" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<ActiveDebugProfile>Container (Dockerfile)</ActiveDebugProfile>
</PropertyGroup>
</Project>

View File

@ -1,28 +0,0 @@
# См. статью по ссылке https://aka.ms/customizecontainer, чтобы узнать как настроить контейнер отладки и как Visual Studio использует этот Dockerfile для создания образов для ускорения отладки.
# Этот этап используется при запуске из VS в быстром режиме (по умолчанию для конфигурации отладки)
FROM mcr.microsoft.com/dotnet/runtime:8.0 AS base
USER app
WORKDIR /app
# Этот этап используется для сборки проекта службы
FROM mcr.microsoft.com/dotnet/sdk:8.0 AS build
ARG BUILD_CONFIGURATION=Release
WORKDIR /src
COPY ["Consumer2.csproj", "."]
RUN dotnet restore "./Consumer2.csproj"
COPY . .
WORKDIR "/src/."
RUN dotnet build "./Consumer2.csproj" -c $BUILD_CONFIGURATION -o /app/build
# Этот этап используется для публикации проекта службы, который будет скопирован на последний этап
FROM build AS publish
ARG BUILD_CONFIGURATION=Release
RUN dotnet publish "./Consumer2.csproj" -c $BUILD_CONFIGURATION -o /app/publish /p:UseAppHost=false
# Этот этап используется в рабочей среде или при запуске из VS в обычном режиме (по умолчанию, когда конфигурация отладки не используется)
FROM base AS final
WORKDIR /app
COPY --from=publish /app/publish .
ENTRYPOINT ["dotnet", "Consumer2.dll"]

View File

@ -1,40 +0,0 @@
using RabbitMQ.Client;
using RabbitMQ.Client.Events;
using System.Data.Common;
using System.Text;
using System.Threading.Channels;
var factory = new ConnectionFactory
{
HostName = "rabbitmq",
UserName = "admin",
Password = "admin"
};
using var connection = await factory.CreateConnectionAsync();
using var channel = await connection.CreateChannelAsync();
var queueName = "fast_queue";
var exchangeName = "logs_exchange";
await channel.QueueDeclareAsync(queue: queueName, durable: true, exclusive: false, autoDelete: false, arguments: null);
await channel.QueueBindAsync(queue: queueName, exchange: exchangeName, routingKey: "");
Console.WriteLine("[Consumer2] Waiting for messages...");
while (true)
{
var consumer = new AsyncEventingBasicConsumer(channel);
consumer.ReceivedAsync += (model, ea) =>
{
var body = ea.Body.ToArray();
var message = Encoding.UTF8.GetString(body);
Console.WriteLine($"[Consumer2] Received: {message}");
Console.WriteLine("[Consumer2] Done processing");
channel.BasicAckAsync(deliveryTag: ea.DeliveryTag, multiple: false);
return Task.CompletedTask;
};
await channel.BasicConsumeAsync(queue: queueName, autoAck: false, consumer: consumer);
}

View File

@ -1,10 +0,0 @@
{
"profiles": {
"Consumer2": {
"commandName": "Project"
},
"Container (Dockerfile)": {
"commandName": "Docker"
}
}
}

View File

@ -1,26 +0,0 @@
using RabbitMQ.Client;
using RabbitMQ.Client.Events;
using System.Text;
var factory = new ConnectionFactory { HostName = "localhost" };
using var connection = await factory.CreateConnectionAsync();
using var channel = await connection.CreateChannelAsync();
await channel.QueueDeclareAsync(queue: "hello", durable: false,
exclusive: false, autoDelete: false,arguments: null);
Console.WriteLine("[*] Waiting for messages...");
var consumer = new AsyncEventingBasicConsumer(channel);
consumer.ReceivedAsync += (model, ea) =>
{
var body = ea.Body.ToArray();
var message = Encoding.UTF8.GetString(body);
Console.WriteLine($" [*] Received {message}");
return Task.CompletedTask;
};
await channel.BasicConsumeAsync("hello", autoAck: true, consumer: consumer);
Console.WriteLine(" Press [enter] to exit.");
Console.ReadLine();

View File

@ -1,18 +0,0 @@
using RabbitMQ.Client;
using System.Text;
var factory = new ConnectionFactory { HostName = "localhost" };
using var connection = await factory.CreateConnectionAsync();
using var channel = await connection.CreateChannelAsync();
await channel.QueueDeclareAsync(queue: "hello", durable: false,
exclusive: false, autoDelete: false, arguments: null);
const string message = "Hello, World! ~from Artem";
var body = Encoding.UTF8.GetBytes(message);
await channel.BasicPublishAsync(exchange: string.Empty, routingKey: "hello", body: body);
Console.WriteLine($" [x] Sent {message}");
Console.WriteLine(" Press [enter] to exit.");
Console.ReadLine();

View File

@ -1,14 +0,0 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net8.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="RabbitMQ.Client" Version="7.0.0" />
</ItemGroup>
</Project>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 147 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 69 KiB

View File

@ -1,30 +0,0 @@
**/.classpath
**/.dockerignore
**/.env
**/.git
**/.gitignore
**/.project
**/.settings
**/.toolstarget
**/.vs
**/.vscode
**/*.*proj.user
**/*.dbmdl
**/*.jfm
**/azds.yaml
**/bin
**/charts
**/docker-compose*
**/Dockerfile*
**/node_modules
**/npm-debug.log
**/obj
**/secrets.dev.yaml
**/values.dev.yaml
LICENSE
README.md
!**/.gitignore
!.git/HEAD
!.git/config
!.git/packed-refs
!.git/refs/heads/**

View File

@ -1,28 +0,0 @@
# См. статью по ссылке https://aka.ms/customizecontainer, чтобы узнать как настроить контейнер отладки и как Visual Studio использует этот Dockerfile для создания образов для ускорения отладки.
# Этот этап используется при запуске из VS в быстром режиме (по умолчанию для конфигурации отладки)
FROM mcr.microsoft.com/dotnet/runtime:8.0 AS base
USER app
WORKDIR /app
# Этот этап используется для сборки проекта службы
FROM mcr.microsoft.com/dotnet/sdk:8.0 AS build
ARG BUILD_CONFIGURATION=Release
WORKDIR /src
COPY ["Publisher.csproj", "."]
RUN dotnet restore "./Publisher.csproj"
COPY . .
WORKDIR "/src/."
RUN dotnet build "./Publisher.csproj" -c $BUILD_CONFIGURATION -o /app/build
# Этот этап используется для публикации проекта службы, который будет скопирован на последний этап
FROM build AS publish
ARG BUILD_CONFIGURATION=Release
RUN dotnet publish "./Publisher.csproj" -c $BUILD_CONFIGURATION -o /app/publish /p:UseAppHost=false
# Этот этап используется в рабочей среде или при запуске из VS в обычном режиме (по умолчанию, когда конфигурация отладки не используется)
FROM base AS final
WORKDIR /app
COPY --from=publish /app/publish .
ENTRYPOINT ["dotnet", "Publisher.dll"]

View File

@ -1,10 +0,0 @@
{
"profiles": {
"Publisher": {
"commandName": "Project"
},
"Container (Dockerfile)": {
"commandName": "Docker"
}
}
}

View File

@ -1,31 +0,0 @@
using RabbitMQ.Client;
using System.Text;
var factory = new ConnectionFactory
{
HostName = "rabbitmq",
UserName = "admin",
Password = "admin"
};
using var connection = await factory.CreateConnectionAsync();
Console.WriteLine("Connection established.");
using var channel = await connection.CreateChannelAsync();
Console.WriteLine("Channel created.");
await channel.ExchangeDeclareAsync(exchange: "logs_exchange", type: ExchangeType.Fanout);
while (true)
{
var message = $"Event: {GenerateRandomEvent()}";
var body = Encoding.UTF8.GetBytes(message);
await channel.BasicPublishAsync(exchange: "logs_exchange", routingKey: string.Empty, body: body);
Console.WriteLine($"[Publisher] Sent: {message}");
await Task.Delay(1000);
}
static string GenerateRandomEvent()
{
var events = new[] { "Order Received", "User Message", "Create Report" };
return events[new Random().Next(events.Length)] + " #" + new Random().Next(0, 99);
}

View File

@ -1,17 +0,0 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net8.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<DockerDefaultTargetOS>Linux</DockerDefaultTargetOS>
<DockerfileContext>.</DockerfileContext>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Microsoft.VisualStudio.Azure.Containers.Tools.Targets" Version="1.21.0" />
<PackageReference Include="RabbitMQ.Client" Version="7.0.0" />
</ItemGroup>
</Project>

View File

@ -1,6 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="Current" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<ActiveDebugProfile>Container (Dockerfile)</ActiveDebugProfile>
</PropertyGroup>
</Project>

View File

@ -1,26 +0,0 @@
using RabbitMQ.Client;
using System.Text;
var factory = new ConnectionFactory { HostName = "localhost" };
using var connection = await factory.CreateConnectionAsync();
using var channel = await connection.CreateChannelAsync();
await channel.QueueDeclareAsync(queue: "task_queue", durable: true, exclusive: false,
autoDelete: false, arguments: null);
var message = GetMessage(args);
var body = Encoding.UTF8.GetBytes(message);
var properties = new BasicProperties
{
Persistent = true
};
await channel.BasicPublishAsync(exchange: string.Empty, routingKey: "task_queue", mandatory: true,
basicProperties: properties, body: body);
Console.WriteLine($" [x] Sent {message}");
static string GetMessage(string[] args)
{
return ((args.Length > 0) ? string.Join(" ", args) : "Hello World!");
}

View File

@ -1,35 +0,0 @@
using RabbitMQ.Client;
using RabbitMQ.Client.Events;
using System.Text;
var factory = new ConnectionFactory { HostName = "localhost" };
using var connection = await factory.CreateConnectionAsync();
using var channel = await connection.CreateChannelAsync();
await channel.QueueDeclareAsync(queue: "task_queue", durable: true, exclusive: false,
autoDelete: false, arguments: null);
await channel.BasicQosAsync(prefetchSize: 0, prefetchCount: 1, global: false);
Console.WriteLine(" [*] Waiting for messages.");
var consumer = new AsyncEventingBasicConsumer(channel);
consumer.ReceivedAsync += async (model, ea) =>
{
byte[] body = ea.Body.ToArray();
var message = Encoding.UTF8.GetString(body);
Console.WriteLine($" [x] Received {message}");
int dots = message.Split('.').Length - 1;
await Task.Delay(dots * 1000);
Console.WriteLine(" [x] Done");
// here channel could also be accessed as ((AsyncEventingBasicConsumer)sender).Channel
await channel.BasicAckAsync(deliveryTag: ea.DeliveryTag, multiple: false);
};
await channel.BasicConsumeAsync("task_queue", autoAck: false, consumer: consumer);
Console.WriteLine(" Press [enter] to exit.");
Console.ReadLine();

View File

@ -1,14 +0,0 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net8.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="RabbitMQ.Client" Version="7.0.0" />
</ItemGroup>
</Project>

View File

@ -1,21 +0,0 @@
using RabbitMQ.Client;
using System.Text;
var factory = new ConnectionFactory { HostName = "localhost" };
using var connection = await factory.CreateConnectionAsync();
using var channel = await connection.CreateChannelAsync();
await channel.ExchangeDeclareAsync(exchange: "logs", type: ExchangeType.Fanout);
var message = GetMessage(args);
var body = Encoding.UTF8.GetBytes(message);
await channel.BasicPublishAsync(exchange: "logs", routingKey: string.Empty, body: body);
Console.WriteLine($" [x] Sent {message}");
Console.WriteLine(" Press [enter] to exit.");
Console.ReadLine();
static string GetMessage(string[] args)
{
return ((args.Length > 0) ? string.Join(" ", args) : "info: Hello World!");
}

View File

@ -1,14 +0,0 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net8.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="RabbitMQ.Client" Version="7.0.0" />
</ItemGroup>
</Project>

View File

@ -1,31 +0,0 @@
using RabbitMQ.Client;
using RabbitMQ.Client.Events;
using System.Text;
var factory = new ConnectionFactory { HostName = "localhost" };
using var connection = await factory.CreateConnectionAsync();
using var channel = await connection.CreateChannelAsync();
await channel.ExchangeDeclareAsync(exchange: "logs",
type: ExchangeType.Fanout);
// declare a server-named queue
QueueDeclareOk queueDeclareResult = await channel.QueueDeclareAsync();
string queueName = queueDeclareResult.QueueName;
await channel.QueueBindAsync(queue: queueName, exchange: "logs", routingKey: string.Empty);
Console.WriteLine(" [*] Waiting for logs.");
var consumer = new AsyncEventingBasicConsumer(channel);
consumer.ReceivedAsync += (model, ea) =>
{
byte[] body = ea.Body.ToArray();
var message = Encoding.UTF8.GetString(body);
Console.WriteLine($" [x] {message}");
return Task.CompletedTask;
};
await channel.BasicConsumeAsync(queueName, autoAck: true, consumer: consumer);
Console.WriteLine(" Press [enter] to exit.");
Console.ReadLine();

View File

@ -1,14 +0,0 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net8.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="RabbitMQ.Client" Version="7.0.0" />
</ItemGroup>
</Project>

View File

@ -1,61 +0,0 @@
services:
rabbitmq:
image: rabbitmq:management
container_name: rabbitmq
restart: always
ports:
- "5672:5672"
- "15672:15672"
environment:
RABBITMQ_DEFAULT_USER: admin
RABBITMQ_DEFAULT_PASS: admin
networks:
- my_network
publisher:
build:
context: ./Publisher
restart: always
depends_on:
- rabbitmq
environment:
RABBITMQ_HOST: rabbitmq
RABBIT_USERNAME: admin
RABBIT_PASSWORD: admin
RABBIT_EXCHANGE: 'logs_exchange'
networks:
- my_network
consumer1:
build:
context: ./Consumer1
restart: always
depends_on:
- rabbitmq
environment:
RABBITMQ_HOST: rabbitmq
RABBIT_USERNAME: admin
RABBIT_PASSWORD: admin
RABBIT_EXCHANGE: 'logs_exchange'
RABBIT_QUEUE: 'slow_queue'
networks:
- my_network
consumer2:
build:
context: ./Consumer2
restart: always
depends_on:
- rabbitmq
environment:
RABBITMQ_HOST: rabbitmq
RABBIT_USERNAME: admin
RABBIT_PASSWORD: admin
RABBIT_EXCHANGE: 'logs_exchange'
RABBIT_QUEUE: 'fast_queue'
networks:
- my_network
networks:
my_network:
driver: bridge

View File

@ -1,37 +0,0 @@
# Лабораторная работа 4 - Работа с брокером сообщений
## ПИбд-42 || Алейкин Артем
### Описание
В данной лабораторной работе мы познакомились с такой утилитой как RabbitMQ.
### Туториалы
1. HelloWorld - Tutorial
![Консольный вывод - первый туториал](./Images/Туториал_1.png)
2. Work Queues - Tutorial
![Консольный вывод - второй туториал](./Images/Туториал_2.png)
3. Publish/Subscribe - Tutorial
![Консольный вывод - третий туториал](./Images/Туториал_3.png)
### Основное задание
Было разработано 3 приложения: Publisher, Consumer1 и Consumer2.
Первое отвечало за доставку сообщений в очереди. Оно генерирует одно сообщение раз в секунду.
Второе и Третье за обработку этих сообщений из очередей, но Consumer1 имел искусственную задержку в 2-3 секунды, в то время как Consumer2 таких ограничений не имел и работу.
### Шаги для запуска:
1. Запуск контейнеров:
```
docker-compose up -d
```
В результате мы можем посмотреть графики по этой ссылке http://localhost:15672/
![График Consumer1 - медленный](./Images/Лаба_Отчет2.png)
![График Consumer2 - быстрый](./Images/Лаба_Отчет1.png)
После этого было добавлено еще 3 клиента типа Consumer1 и только после этого их суммарной производительности стало хватать для обработки сообщений.
![График Consumer1 для нескольких клиентов - медленный](./Images/Лаба_Отчет3.png)
![График Consumer2 - быстрый](./Images/Лаба_Отчет4.png)
Видео демонстрации работы: https://vk.com/video248424990_456239611?list=ln-v0VkWDOiRBxdctENzV

BIN
bazunov_andrew_lab_3/PersonApp/.DS_Store vendored Normal file

Binary file not shown.

View File

@ -0,0 +1,4 @@
PORT=8080
TASK_APP_URL=http://task-app:8000
TIMEOUT=15
DATABASE=./database.db

View File

@ -0,0 +1,14 @@
FROM golang:1.23
WORKDIR /app
COPY go.mod go.sum ./
RUN go mod download
COPY . .
RUN go build -o /bin/PersonApp
EXPOSE 8080
CMD ["/bin/PersonApp"]

Binary file not shown.

View File

@ -0,0 +1,10 @@
module PersonApp
go 1.23.1
require (
github.com/gorilla/mux v1.8.1
github.com/mattn/go-sqlite3 v1.14.24
)
require github.com/joho/godotenv v1.5.1 // indirect

View File

@ -0,0 +1,6 @@
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=

View File

@ -0,0 +1,157 @@
package handlers
import (
"PersonApp/httpClient"
"PersonApp/models"
"PersonApp/repository"
"encoding/json"
"fmt"
"github.com/gorilla/mux"
"net/http"
"strconv"
)
func InitRoutes(r *mux.Router, rep repository.PersonRepository, cln httpClient.Client) {
r.HandleFunc("/", GetPersons(rep, cln)).Methods("GET")
r.HandleFunc("/{id:[0-9]+}", GetPersonById(rep, cln)).Methods("GET")
r.HandleFunc("/", CreatePerson(rep)).Methods("POST")
r.HandleFunc("/{id:[0-9]+}", UpdatePerson(rep)).Methods("PUT")
r.HandleFunc("/{id:[0-9]+}", DeletePerson(rep)).Methods("DELETE")
}
func GetPersons(rep repository.PersonRepository, cln httpClient.Client) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
fmt.Println("GET PERSONS")
persons, err := rep.GetAllPersons()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
for i := 0; i < len(persons); i++ {
tasks, _ := cln.GetPersonTasks(persons[i].Id)
persons[i].Tasks = tasks
}
err = json.NewEncoder(w).Encode(persons)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
}
func GetPersonById(rep repository.PersonRepository, cln httpClient.Client) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
id, err := strconv.Atoi(mux.Vars(r)["id"])
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
person, err := rep.GetPersonById(id)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
tasks, err := cln.GetPersonTasks(id)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
} else {
person.Tasks = tasks
}
err = json.NewEncoder(w).Encode(person)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
}
func CreatePerson(rep repository.PersonRepository) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
var person *models.Person
err := json.NewDecoder(r.Body).Decode(&person)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
person, err = rep.CreatePerson(*person)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusCreated)
err = json.NewEncoder(w).Encode(person)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
}
func UpdatePerson(rep repository.PersonRepository) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
id, err := strconv.Atoi(mux.Vars(r)["id"])
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var person *models.Person
err = json.NewDecoder(r.Body).Decode(&person)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
person, err = rep.UpdatePerson(models.Person{
Id: id,
Name: person.Name,
Tasks: nil,
})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusAccepted)
err = json.NewEncoder(w).Encode(person)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
}
func DeletePerson(rep repository.PersonRepository) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
id, err := strconv.Atoi(mux.Vars(r)["id"])
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = rep.DeletePerson(id)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
}
}

View File

@ -0,0 +1,72 @@
package httpClient
import (
"PersonApp/models"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
)
type Client interface {
GetPersonTasks(id int) ([]models.Task, error)
TestConnection() (bool, error)
}
type client struct {
BaseUrl string
Timeout time.Duration
}
func (c *client) TestConnection() (bool, error) {
client := &http.Client{Timeout: c.Timeout}
url := fmt.Sprintf("%s/", c.BaseUrl)
resp, err := client.Get(url)
if err != nil {
return false, err
}
defer func(Body io.ReadCloser) {
err := Body.Close()
if err != nil {
return
}
}(resp.Body)
if resp.StatusCode != http.StatusOK {
return false, fmt.Errorf("bad status code: %d", resp.StatusCode)
}
return true, nil
}
func (c *client) GetPersonTasks(id int) ([]models.Task, error) {
client := &http.Client{Timeout: c.Timeout * time.Second}
url := fmt.Sprintf("%s/f/%d", c.BaseUrl, id)
resp, err := client.Get(url)
if err != nil {
return nil, err
}
defer func(Body io.ReadCloser) {
err := Body.Close()
if err != nil {
}
}(resp.Body)
body, _ := io.ReadAll(resp.Body)
var tasks []models.Task
if err := json.Unmarshal(body, &tasks); err != nil {
fmt.Printf("Unmarshal error: %s", err)
return []models.Task{}, err
}
return tasks, nil
}
func NewClient(baseUrl string, timeout time.Duration) Client {
return &client{BaseUrl: baseUrl, Timeout: timeout}
}

View File

@ -0,0 +1,34 @@
GET http://localhost/person-app/
Accept: application/json
###
GET http://localhost/person-app/1
Accept: application/json
###
POST http://localhost/person-app/
Accept: application/json
Content-Type: application/json
{
"name": "TEST3"
}
###
PUT http://localhost/person-app/3
Accept: application/json
Content-Type: application/json
{
"name": "TEST11"
}
###
DELETE http://localhost/person-app/3
Accept: application/json
###

View File

@ -0,0 +1,47 @@
package main
import (
"PersonApp/handlers"
"PersonApp/httpClient"
"PersonApp/repository"
"PersonApp/storage"
"github.com/gorilla/mux"
"github.com/joho/godotenv"
"net/http"
"os"
"strconv"
"time"
)
func main() {
err := godotenv.Load(".env")
if err != nil {
panic("Error loading .env file")
}
url := os.Getenv("TASK_APP_URL")
port := os.Getenv("PORT")
databasePath := os.Getenv("DATABASE")
timeout, err := strconv.Atoi(os.Getenv("TIMEOUT"))
if err != nil {
panic("Error converting timeout to int")
}
database, err := storage.Init(databasePath)
if err != nil {
panic(err)
}
cln := httpClient.NewClient(url, time.Duration(timeout))
rep := repository.NewPersonRepository(database)
router := mux.NewRouter()
handlers.InitRoutes(router, rep, cln)
err = http.ListenAndServe(":"+port, router)
if err != nil {
panic(err)
}
storage.Close(database)
}

View File

@ -0,0 +1,24 @@
package models
type Person struct {
Id int `json:"id"`
Name string `json:"name"`
Tasks []Task `json:"tasks"`
}
type PersonCreate struct {
Name string `json:"name"`
}
type Task struct {
Id int `json:"id"`
PersonId int `json:"person_id"`
Name string `json:"name"`
Date string `json:"date"`
}
type TaskCreate struct {
PersonId int `json:"person_id"`
Name string `json:"name"`
Date string `json:"date"`
}

View File

@ -0,0 +1,99 @@
package repository
import (
"PersonApp/models"
"database/sql"
)
type PersonRepository interface {
GetAllPersons() ([]models.Person, error)
GetPersonById(id int) (*models.Person, error)
CreatePerson(person models.Person) (*models.Person, error)
UpdatePerson(person models.Person) (*models.Person, error)
DeletePerson(id int) error
}
type personRepository struct {
DB *sql.DB
}
func NewPersonRepository(db *sql.DB) PersonRepository {
return &personRepository{DB: db}
}
func (pr *personRepository) GetAllPersons() ([]models.Person, error) {
rows, err := pr.DB.Query("select * from Persons")
if err != nil {
return nil, err
}
defer func(rows *sql.Rows) {
err := rows.Close()
if err != nil {
panic(err)
}
}(rows)
var persons []models.Person
for rows.Next() {
p := models.Person{}
err := rows.Scan(&p.Id, &p.Name)
if err != nil {
panic(err)
}
persons = append(persons, p)
}
return persons, err
}
func (pr *personRepository) GetPersonById(id int) (*models.Person, error) {
row := pr.DB.QueryRow("select * from Persons where id=?", id)
person := models.Person{}
err := row.Scan(&person.Id, &person.Name)
if err != nil {
return nil, err
}
return &person, err
}
func (pr *personRepository) CreatePerson(p models.Person) (*models.Person, error) {
res, err := pr.DB.Exec("INSERT INTO Persons (name) values (?)", p.Name)
if err != nil {
return nil, err
}
if res == nil {
return nil, nil
}
return &p, err
}
func (pr *personRepository) UpdatePerson(p models.Person) (*models.Person, error) {
res, err := pr.DB.Exec("UPDATE Persons SET name = ? WHERE id = ?", p.Name, p.Id)
if err != nil {
return nil, err
}
if res == nil {
return nil, nil
}
return &p, err
}
func (pr *personRepository) DeletePerson(id int) error {
_, err := pr.DB.Exec("DELETE FROM Persons WHERE id = ?", id)
if err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,36 @@
package storage
import (
"database/sql"
_ "github.com/mattn/go-sqlite3"
)
func Init(databasePath string) (*sql.DB, error) {
db, err := sql.Open("sqlite3", databasePath)
if err != nil || db == nil {
return nil, err
}
if err := createTableIfNotExists(db); err != nil {
return nil, err
}
return db, nil
}
func Close(db *sql.DB) {
err := db.Close()
if err != nil {
return
}
}
func createTableIfNotExists(db *sql.DB) error {
if result, err := db.Exec(
"CREATE TABLE IF NOT EXISTS `Persons`(Id integer primary key autoincrement, Name text not null);",
); err != nil || result == nil {
return err
}
return nil
}

View File

@ -1,11 +1,6 @@
# Распределенные вычисления и приложения Л2
# Распределенные вычисления и приложения Л3
## _Автор Базунов Андрей Игревич ПИбд-42_
Сервисы ( _порядок исполнения сервисов соблюден_ ):
- 1.FileCreator - (_Создание тестовых данных_)
- 2.FirstService - (_Выполнение 1.4 варианта задания_)
- 3.SecondService - (_Выполнение 2.2 варианта задания_)
В качестве основного языка был выбран GoLang. Для каждого сервиса был создан DOCKERFILE где были прописаны условия и действия для сборки каждого из модулей
# Docker
@ -27,4 +22,4 @@ docker-compose up -d --build
docker-compose down
```
[Демонстрация работы](https://vk.com/video236673313_456239575)
[Демонстрация работы](https://vk.com/video/@viltskaa?z=video236673313_456239577%2Fpl_236673313_-2)

View File

@ -0,0 +1,4 @@
PORT=8000
PERSON_APP_URL=http://person-app:8080
TIMEOUT=15
DATABASE=./database.db

View File

@ -0,0 +1,14 @@
FROM golang:1.23
WORKDIR /app
COPY go.mod go.sum ./
RUN go mod download
COPY . .
RUN go build -o /bin/TaskApp
EXPOSE 8000
CMD ["/bin/TaskApp"]

Binary file not shown.

View File

@ -0,0 +1,10 @@
module TaskApp
go 1.23.1
require (
github.com/gorilla/mux v1.8.1
github.com/mattn/go-sqlite3 v1.14.24
)
require github.com/joho/godotenv v1.5.1

View File

@ -0,0 +1,6 @@
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=

View File

@ -0,0 +1,177 @@
package handlers
import (
"TaskApp/httpClient"
"TaskApp/models"
"TaskApp/repository"
"encoding/json"
"fmt"
"github.com/gorilla/mux"
"net/http"
"strconv"
)
func InitRoutes(r *mux.Router, rep repository.TaskRepository, cln httpClient.Client) {
r.HandleFunc("/", GetTasks(rep)).Methods("GET")
r.HandleFunc("/{id:[0-9]+}", GetTaskById(rep)).Methods("GET")
r.HandleFunc("/", CreateTask(rep, cln)).Methods("POST")
r.HandleFunc("/{id:[0-9]+}", UpdateTask(rep)).Methods("PUT")
r.HandleFunc("/{id:[0-9]+}", DeleteTask(rep)).Methods("DELETE")
r.HandleFunc("/f/{id:[0-9]+}", GetPersonTasks(rep)).Methods("GET")
}
func GetTasks(rep repository.TaskRepository) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
tasks, err := rep.GetAllTasks()
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
err = json.NewEncoder(w).Encode(tasks)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
}
}
}
func GetTaskById(rep repository.TaskRepository) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
id, err := strconv.Atoi(mux.Vars(r)["id"])
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
person, err := rep.GetTaskById(id)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = json.NewEncoder(w).Encode(person)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
}
func GetPersonTasks(rep repository.TaskRepository) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
id, err := strconv.Atoi(mux.Vars(r)["id"])
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
tasks, err := rep.GetUserTasks(id)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = json.NewEncoder(w).Encode(tasks)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
}
func CreateTask(rep repository.TaskRepository, cln httpClient.Client) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
var task *models.TaskCreate
err := json.NewDecoder(r.Body).Decode(&task)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if &task.Name == nil || &task.PersonId == nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
person, err := cln.GetPerson(task.PersonId)
if err != nil {
fmt.Println(err)
http.Error(w, "Connection to PersonApp is confused.", http.StatusInternalServerError)
return
}
if person == nil {
http.Error(w, fmt.Sprintf("Person with id=%d is't founded.", person.Id), http.StatusBadGateway)
return
}
newTask, err := rep.CreateTask(*task)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusCreated)
err = json.NewEncoder(w).Encode(newTask)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
}
func UpdateTask(rep repository.TaskRepository) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
id, err := strconv.Atoi(mux.Vars(r)["id"])
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
var task *models.TaskCreate
err = json.NewDecoder(r.Body).Decode(&task)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
newTask, err := rep.UpdateTask(models.Task{Id: id, Name: task.Name, Date: task.Date})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = json.NewEncoder(w).Encode(newTask)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
}
func DeleteTask(rep repository.TaskRepository) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
id, err := strconv.Atoi(mux.Vars(r)["id"])
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
err = rep.DeleteTask(id)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
}
}

View File

@ -0,0 +1,73 @@
package httpClient
import (
"TaskApp/models"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"time"
)
type Client interface {
GetPerson(id int) (*models.Person, error)
TestConnection() (bool, error)
}
type client struct {
BaseUrl string
Timeout time.Duration
}
func (c *client) TestConnection() (bool, error) {
client := &http.Client{Timeout: c.Timeout}
url := fmt.Sprintf("%s/", c.BaseUrl)
resp, err := client.Get(url)
if err != nil {
return false, err
}
defer func(Body io.ReadCloser) {
err := Body.Close()
if err != nil {
return
}
}(resp.Body)
if resp.StatusCode != http.StatusOK {
return false, fmt.Errorf("bad status code: %d", resp.StatusCode)
}
return true, nil
}
func (c *client) GetPerson(id int) (*models.Person, error) {
client := &http.Client{Timeout: c.Timeout * time.Second}
url := fmt.Sprintf("%s/%d", c.BaseUrl, id)
resp, err := client.Get(url)
if err != nil {
return nil, err
}
defer func(Body io.ReadCloser) {
err := Body.Close()
if err != nil {
}
}(resp.Body)
body, _ := io.ReadAll(resp.Body)
var person models.Person
if err := json.Unmarshal(body, &person); err != nil {
log.Printf("Unmarshal error: %s", err)
return nil, err
}
return &person, nil
}
func NewClient(baseUrl string, timeout time.Duration) Client {
return &client{BaseUrl: baseUrl, Timeout: timeout}
}

View File

@ -0,0 +1,37 @@
GET http://localhost/task-app/
Accept: application/json
###
GET http://localhost/task-app/4
Accept: application/json
###
POST http://localhost/task-app/
Accept: application/json
Content-Type: application/json
{
"name": "TEST2",
"person_id": 1,
"date": "19.02.2202"
}
###
PUT http://localhost/task-app/4
Accept: application/json
Content-Type: application/json
{
"name": "TEST5",
"date": "19.02.2202"
}
###
DELETE http://localhost/task-app/4
Accept: application/json
###

View File

@ -0,0 +1,47 @@
package main
import (
"TaskApp/handlers"
"TaskApp/httpClient"
"TaskApp/repository"
"TaskApp/storage"
"github.com/gorilla/mux"
"github.com/joho/godotenv"
"net/http"
"os"
"strconv"
"time"
)
func main() {
err := godotenv.Load(".env")
if err != nil {
panic("Error loading .env file")
}
url := os.Getenv("PERSON_APP_URL")
port := os.Getenv("PORT")
databasePath := os.Getenv("DATABASE")
timeout, err := strconv.Atoi(os.Getenv("TIMEOUT"))
if err != nil {
panic("Error converting timeout to int")
}
database, err := storage.Init(databasePath)
if err != nil {
panic(err)
}
cln := httpClient.NewClient(url, time.Duration(timeout))
rep := repository.NewTaskRepository(database)
router := mux.NewRouter()
handlers.InitRoutes(router, rep, cln)
err = http.ListenAndServe(":"+port, router)
if err != nil {
panic(err)
}
storage.Close(database)
}

View File

@ -0,0 +1,24 @@
package models
type Person struct {
Id int `json:"id"`
Name string `json:"name"`
Tasks []Task `json:"tasks"`
}
type PersonCreate struct {
Name string `json:"name"`
}
type Task struct {
Id int `json:"id"`
PersonId int `json:"person_id"`
Name string `json:"name"`
Date string `json:"date"`
}
type TaskCreate struct {
PersonId int `json:"person_id"`
Name string `json:"name"`
Date string `json:"date"`
}

View File

@ -0,0 +1,121 @@
package repository
import (
"TaskApp/models"
"database/sql"
)
type TaskRepository interface {
GetAllTasks() ([]models.Task, error)
GetTaskById(id int) (*models.Task, error)
GetUserTasks(id int) ([]models.Task, error)
CreateTask(task models.TaskCreate) (*models.Task, error)
UpdateTask(task models.Task) (*models.Task, error)
DeleteTask(id int) error
}
type taskRepository struct {
DB *sql.DB
}
func (t taskRepository) GetUserTasks(id int) ([]models.Task, error) {
rows, err := t.DB.Query("select * from Tasks where PersonId = ?", id)
if err != nil {
return nil, err
}
defer func(rows *sql.Rows) {
err := rows.Close()
if err != nil {
panic(err)
}
}(rows)
var tasks []models.Task
for rows.Next() {
p := models.Task{}
err := rows.Scan(&p.Id, &p.Name, &p.PersonId, &p.Date)
if err != nil {
panic(err)
}
tasks = append(tasks, p)
}
return tasks, err
}
func (t taskRepository) GetAllTasks() ([]models.Task, error) {
rows, err := t.DB.Query("select * from Tasks")
if err != nil {
return nil, err
}
defer func(rows *sql.Rows) {
err := rows.Close()
if err != nil {
panic(err)
}
}(rows)
var tasks []models.Task
for rows.Next() {
p := models.Task{}
err := rows.Scan(&p.Id, &p.Name, &p.PersonId, &p.Date)
if err != nil {
panic(err)
}
tasks = append(tasks, p)
}
return tasks, err
}
func (t taskRepository) GetTaskById(id int) (*models.Task, error) {
row := t.DB.QueryRow("select * from Tasks where id=?", id)
task := models.Task{}
err := row.Scan(&task.Id, &task.Name, &task.PersonId, &task.Date)
if err != nil {
return nil, err
}
return &task, err
}
func (t taskRepository) CreateTask(task models.TaskCreate) (*models.Task, error) {
_, err := t.DB.Exec("INSERT INTO Tasks(Name, PersonId, Date) VALUES (?, ?, ?)", task.Name, task.PersonId, task.Date)
if err != nil {
return nil, err
}
return &models.Task{
Id: 0,
PersonId: task.PersonId,
Name: task.Name,
Date: task.Date,
}, err
}
func (t taskRepository) UpdateTask(task models.Task) (*models.Task, error) {
_, err := t.DB.Exec("UPDATE Tasks SET name = ?, date = ? WHERE id = ?", task.Name, task.Date, task.Id)
if err != nil {
return nil, err
}
return &task, err
}
func (t taskRepository) DeleteTask(id int) error {
_, err := t.DB.Exec("DELETE FROM Tasks WHERE id = ?", id)
if err != nil {
return err
}
return nil
}
func NewTaskRepository(db *sql.DB) TaskRepository {
return &taskRepository{DB: db}
}

View File

@ -0,0 +1,36 @@
package storage
import (
"database/sql"
_ "github.com/mattn/go-sqlite3"
)
func Init(databasePath string) (*sql.DB, error) {
db, err := sql.Open("sqlite3", databasePath)
if err != nil || db == nil {
return nil, err
}
if err := createTableIfNotExists(db); err != nil {
return nil, err
}
return db, nil
}
func Close(db *sql.DB) {
err := db.Close()
if err != nil {
return
}
}
func createTableIfNotExists(db *sql.DB) error {
if result, err := db.Exec(
"CREATE TABLE IF NOT EXISTS `Tasks`(Id integer primary key autoincrement, Name text not null, PersonId integer not null, Date text not null);",
); err != nil || result == nil {
return err
}
return nil
}

View File

@ -0,0 +1,34 @@
services:
person-app:
build:
context: ./PersonApp
dockerfile: Dockerfile
networks:
- network
ports:
- "8080:8080"
task-app:
build:
context: ./TaskApp
dockerfile: Dockerfile
networks:
- network
ports:
- "8000:8000"
nginx:
image: nginx
ports:
- "80:80"
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf
networks:
- network
depends_on:
- person-app
- task-app
networks:
network:
driver: bridge

View File

@ -0,0 +1,59 @@
events {
worker_connections 1024;
}
http {
server {
listen 80;
server_name localhost;
location /person-app/ {
proxy_pass http://person-app:8080/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
add_header 'Access-Control-Allow-Origin' '*';
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
add_header 'Access-Control-Allow-Headers' 'Origin, Content-Type, Accept, Authorization';
}
location /task-app/ {
proxy_pass http://task-app:8000/;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
add_header 'Access-Control-Allow-Origin' '*';
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
add_header 'Access-Control-Allow-Headers' 'Origin, Content-Type, Accept, Authorization';
}
# Прокси для Swagger (Stream-сервис)
#location /stream-service/swagger/ {
# proxy_pass http://stream-service:8000/swagger/;
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto $scheme;
#}
# Прокси для Swagger (Message-сервис)
#location /message-service/swagger/ {
# proxy_pass http://message-service:8080/swagger/;
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto $scheme;
#}
#location /stream-service/doc.json {
# proxy_pass http://stream-service:8000/doc.json;
#}
#location /message-service/doc.json {
# proxy_pass http://message-service:8080/doc.json;
#}
}
}

View File

@ -0,0 +1,34 @@
# Лабораторная работа №4: Работа с брокером сообщений (RabbitMQ)
## Цель
Изучение проектирования приложений с использованием брокера сообщений RabbitMQ.
---
## Задачи
> 1. **Установить RabbitMQ**
Установите RabbitMQ на локальный компьютер (или используйте Docker).
>- [Скачивание RabbitMQ](https://www.rabbitmq.com/download.html)
>- [Релизы RabbitMQ](https://github.com/rabbitmq/rabbitmq-server/releases/)
>- **Пройти уроки RabbitMQ**
>- Сделайте скриншоты, показывающие запуск `producer` и `consumer` и передачу сообщений.
---
## Первый урок
> ![img.png](static/img1.png)
---
## Второй урок
>![img.png](static/img2.png)
>![img_1.png](static/img3.png)
---
## Третий урок
> ![img.png](static/img4.png)
---
## Задача
>![img.png](static/img5.png)
> ![img.png](static/img.png)

View File

@ -0,0 +1,17 @@
version: "3.2"
services:
rabbitmq:
image: rabbitmq:3-management-alpine
container_name: 'rabbitmq'
ports:
- "5672:5672"
- "15672:15672"
volumes:
- ~/.docker-conf/rabbitmq/data/:/var/lib/rabbitmq/
- ~/.docker-conf/rabbitmq/log/:/var/log/rabbitmq
networks:
- rabbitmq_go_net
networks:
rabbitmq_go_net:
driver: bridge

View File

@ -0,0 +1,47 @@
from datetime import datetime
import random
import threading
import pika
import sys
_alphabet = [chr(i) for i in range(97, 123)]
def run_every_n_seconds(seconds, action, *args):
threading.Timer(seconds, run_every_n_seconds, [seconds, action] + list(args)).start()
action(*args)
def generate_message():
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
return f"[{current_time}] " + "".join(random.choices(_alphabet, k=random.randint(1, 10)))
def send_message(channel_local):
message = generate_message()
channel_local.basic_publish(
exchange='vk_messages',
routing_key='vk_messages',
body=message,
properties=pika.BasicProperties(
delivery_mode=pika.DeliveryMode.Persistent
))
print(f"[vkAuthor] Sent {message}")
def main(conn: pika.BlockingConnection):
channel = conn.channel()
channel.exchange_declare(exchange='vk_messages', exchange_type='fanout')
run_every_n_seconds(1, send_message, channel)
if __name__ == '__main__':
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
try:
main(connection)
except KeyboardInterrupt:
connection.close()
sys.exit(0)

View File

@ -0,0 +1,44 @@
import sys
from datetime import datetime
import pika
_QUEUE_NAME = "vk_messages_queue"
_EXCHANGE_NAME = "vk_messages"
def main():
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(
exchange=_EXCHANGE_NAME,
exchange_type='fanout'
)
channel.queue_declare(queue=_QUEUE_NAME, exclusive=True)
channel.queue_bind(exchange=_EXCHANGE_NAME, queue=_QUEUE_NAME)
def callback(ch, method, properties, body):
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print(f"[vkReader] Received [{str(body)}] in [{current_time}]")
ch.basic_ack(delivery_tag=method.delivery_tag)
channel.basic_consume(
queue=_QUEUE_NAME,
on_message_callback=callback,
auto_ack=False
)
print('[*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Interrupted')
sys.exit(0)

View File

@ -0,0 +1,47 @@
import time
import random
from datetime import datetime
import pika
import sys
_QUEUE_NAME = "vk_messages_queue_slow"
_EXCHANGE_NAME = "vk_messages"
def main():
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(
exchange=_EXCHANGE_NAME,
exchange_type='fanout'
)
channel.queue_declare(queue=_QUEUE_NAME, exclusive=True)
channel.queue_bind(exchange=_EXCHANGE_NAME, queue=_QUEUE_NAME)
def callback(ch, method, properties, body):
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print(f"[vkSlowReader] Received [{str(body)}] in [{current_time}]")
read_time = random.randint(2, 5)
time.sleep(read_time)
ch.basic_ack(delivery_tag=method.delivery_tag)
channel.basic_consume(
queue=_QUEUE_NAME,
on_message_callback=callback,
auto_ack=False
)
print('[*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Interrupted')
sys.exit(0)

View File

@ -0,0 +1,25 @@
import pika
import sys
def main():
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='hello')
def callback(ch, method, properties, body):
print(f" [x] Received {body}")
channel.basic_consume(queue='hello', on_message_callback=callback, auto_ack=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Interrupted')
sys.exit(0)

View File

@ -0,0 +1,11 @@
import pika
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='hello')
channel.basic_publish(exchange='', routing_key='hello', body='Hello World!')
print(" [x] Sent 'Hello World!'")
connection.close()

View File

@ -0,0 +1,19 @@
import pika
import sys
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='task_queue', durable=True)
message = ' '.join(sys.argv[1:]) or "Hello World!"
channel.basic_publish(
exchange='',
routing_key='task_queue',
body=message,
properties=pika.BasicProperties(
delivery_mode=pika.DeliveryMode.Persistent
))
print(f" [x] Sent {message}")
connection.close()

View File

@ -0,0 +1,22 @@
import pika
import time
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='task_queue', durable=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
def callback(ch, method, properties, body):
print(f" [x] Received {body.decode()}")
time.sleep(body.count(b'.'))
print(" [x] Done")
ch.basic_ack(delivery_tag=method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(queue='task_queue', on_message_callback=callback)
channel.start_consuming()

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 204 KiB

View File

@ -0,0 +1,13 @@
import pika
import sys
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='logs', exchange_type='fanout')
message = ' '.join(sys.argv[1:]) or "info: Hello World!"
channel.basic_publish(exchange='logs', routing_key='', body=message)
print(f" [x] Sent {message}")
connection.close()

View File

@ -0,0 +1,24 @@
import pika
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='logs', exchange_type='fanout')
result = channel.queue_declare(queue='', exclusive=True)
queue_name = result.method.queue
channel.queue_bind(exchange='logs', queue=queue_name)
print(' [*] Waiting for logs. To exit press CTRL+C')
def callback(ch, method, properties, body):
print(f" [x] {body}")
channel.basic_consume(
queue=queue_name, on_message_callback=callback, auto_ack=True)
channel.start_consuming()

View File

@ -0,0 +1,30 @@
import pika
import time
def callback(ch, method, properties, body):
print(f'Consumer 1 получил сообщение: {body.decode()}')
# Время задержки по условию
time.sleep(2)
print('Consumer 1 закончил обработку')
def consume_events_1():
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
# Создание очереди
channel.queue_declare(queue='consumer1_queue')
# Привязка очереди
channel.queue_bind(exchange='beauty_salon_events', queue='consumer1_queue')
channel.basic_consume(queue='consumer1_queue', on_message_callback=callback, auto_ack=True)
print('Consumer 1 начал ожидать сообщения...')
channel.start_consuming()
if __name__ == "__main__":
consume_events_1()

View File

@ -0,0 +1,28 @@
import pika
def callback(ch, method, properties, body):
print(f'Consumer 2 получил сообщение: {body.decode()}')
# Обработка "нон-стопом"
print('Consumer 2 закончил обработку')
def consume_events_2():
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
# Создание очереди
channel.queue_declare(queue='consumer2_queue')
# Привязка очереди
channel.queue_bind(exchange='beauty_salon_events', queue='consumer2_queue')
channel.basic_consume(queue='consumer2_queue', on_message_callback=callback, auto_ack=True)
print('Consumer 2 начал ожидать сообщения...')
channel.start_consuming()
if __name__ == "__main__":
consume_events_2()

View File

@ -0,0 +1,56 @@
### Лабораторная работа №4 - Работа с брокером сообщений
#### Задание
1. Установить брокер сообщений RabbitMQ.
2. Пройти уроки 1, 2 и 3 из RabbitMQ Tutorials на любом языке программирования.
3. Продемонстрировать работу брокера сообщений.
#### Описание работы программы:
- **Класс Publisher** успешно осуществляет отправку сообщений своим клиентам.
- **Класс Consumer1** принимает и обрабатывает сообщения с задержкой в 3 секунды, что можно заметить на видео.
- **Класс Consumer2** мгновенно принимает и обрабатывает сообщения.
#### Уроки
1. lesson_1
![lesson_1.png](lesson_1.png)
2. lesson_2
![lesson_2.png](lesson_2.png)
3. lesson_3
![lesson_3.png](lesson_3.png)
## Работа с RabbitMQ Management UI
![img_3.png](img_3.png)
## Показания очереди queue_1 при одном запущенном экземпляре Consumer_1
![img.png](img.png)
## Показания очереди queue_2
![img_1.png](img_1.png)
## Показания очереди queue_1 при двух запущенных экземплярах Consumer_1
![img_2.png](img_2.png)
## Показания очереди queue_1 при трех запущенных экземплярах Consumer_1
![img_4.png](img_4.png)
## Диспетчер задач
![img_5.png](img_5.png)
## Видео
https://vk.com/video64471408_456239207?list=ln-HGhG4o92uxLaxnsLRj

BIN
kadyrov_aydar_lab_4/img.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

View File

@ -0,0 +1,25 @@
import pika, sys, os
def main():
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='hello')
def callback(ch, method, properties, body):
print(f" [x] Received {body}")
channel.basic_consume(queue='hello', on_message_callback=callback, auto_ack=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Interrupted')
try:
sys.exit(0)
except SystemExit:
os._exit(0)

View File

@ -0,0 +1,11 @@
import pika
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='hello')
channel.basic_publish(exchange='', routing_key='hello', body='Hello World!')
print(" [x] Sent 'Hello World!'")
connection.close()

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

View File

@ -0,0 +1,19 @@
import pika
import sys
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='task_queue', durable=True)
message = ' '.join(sys.argv[1:]) or "Hello World!"
channel.basic_publish(
exchange='',
routing_key='task_queue',
body=message,
properties=pika.BasicProperties(
delivery_mode=pika.DeliveryMode.Persistent
))
print(f" [x] Sent {message}")
connection.close()

View File

@ -0,0 +1,23 @@
#!/usr/bin/env python
import pika
import time
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='task_queue', durable=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
def callback(ch, method, properties, body):
print(f" [x] Received {body.decode()}")
time.sleep(body.count(b'.'))
print(" [x] Done")
ch.basic_ack(delivery_tag=method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(queue='task_queue', on_message_callback=callback)
channel.start_consuming()

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Some files were not shown because too many files have changed in this diff Show More