115 lines
3.3 KiB
Python
115 lines
3.3 KiB
Python
|
import time
|
||
|
import multiprocessing
|
||
|
import numpy as np
|
||
|
|
||
|
def determinant_sequential(matrix):
|
||
|
n = len(matrix)
|
||
|
mat = np.copy(matrix)
|
||
|
|
||
|
for i in range(n):
|
||
|
max_row = i
|
||
|
for k in range(i + 1, n):
|
||
|
if abs(mat[k][i]) > abs(mat[max_row][i]):
|
||
|
max_row = k
|
||
|
|
||
|
if abs(mat[max_row][i]) < 1e-9:
|
||
|
return 0
|
||
|
|
||
|
mat[[i, max_row]] = mat[[max_row, i]]
|
||
|
|
||
|
for k in range(i + 1, n):
|
||
|
factor = mat[k][i] / mat[i][i]
|
||
|
for j in range(i, n):
|
||
|
mat[k][j] -= factor * mat[i][j]
|
||
|
|
||
|
det = 1
|
||
|
for i in range(n):
|
||
|
det *= mat[i][i]
|
||
|
return det
|
||
|
|
||
|
def determinant_parallel_worker(matrix_part, row_indices):
|
||
|
n_part = len(matrix_part)
|
||
|
local_matrix = np.copy(matrix_part)
|
||
|
local_row_indices = np.copy(row_indices)
|
||
|
|
||
|
for i in range(n_part):
|
||
|
max_row = i
|
||
|
for k in range(i + 1, n_part):
|
||
|
if abs(local_matrix[k][i]) > abs(local_matrix[max_row][i]):
|
||
|
max_row = k
|
||
|
|
||
|
if abs(local_matrix[max_row][i]) < 1e-9:
|
||
|
return 0, local_row_indices
|
||
|
|
||
|
local_matrix[[i, max_row]] = local_matrix[[max_row, i]]
|
||
|
local_row_indices[[i, max_row]] = local_row_indices[[max_row, i]]
|
||
|
|
||
|
for k in range(i + 1, n_part):
|
||
|
factor = local_matrix[k][i] / local_matrix[i][i]
|
||
|
for j in range(i, n_part):
|
||
|
local_matrix[k][j] -= factor * local_matrix[i][j]
|
||
|
|
||
|
det_part = 1
|
||
|
for i in range(n_part):
|
||
|
det_part *= local_matrix[i][i]
|
||
|
return det_part, local_row_indices
|
||
|
|
||
|
|
||
|
def determinant_parallel(matrix, num_threads):
|
||
|
n = len(matrix)
|
||
|
if n == 1:
|
||
|
return matrix[0][0]
|
||
|
|
||
|
if num_threads > n:
|
||
|
num_threads = n
|
||
|
|
||
|
chunk_size = n // num_threads
|
||
|
|
||
|
with multiprocessing.Pool(processes=num_threads) as pool:
|
||
|
results = []
|
||
|
row_indices = np.arange(n)
|
||
|
for i in range(num_threads):
|
||
|
start_row = i * chunk_size
|
||
|
end_row = (i + 1) * chunk_size if i < num_threads - 1 else n
|
||
|
res = pool.apply_async(determinant_parallel_worker,
|
||
|
(matrix[start_row:end_row, :], row_indices[start_row:end_row]))
|
||
|
results.append(res)
|
||
|
|
||
|
partial_dets = []
|
||
|
for res in results:
|
||
|
det_part, row_indices_part = res.get()
|
||
|
partial_dets.append(det_part)
|
||
|
|
||
|
|
||
|
final_det = 1
|
||
|
for det_part in partial_dets:
|
||
|
final_det *= det_part
|
||
|
|
||
|
return final_det
|
||
|
|
||
|
|
||
|
|
||
|
def calculate_determinant(matrix, num_threads):
|
||
|
if num_threads == 1:
|
||
|
return determinant_sequential(matrix)
|
||
|
else:
|
||
|
return determinant_parallel(matrix, num_threads)
|
||
|
|
||
|
|
||
|
if __name__ == '__main__':
|
||
|
sizes = [100, 300, 500]
|
||
|
num_threads_list = [1, 2, 4]
|
||
|
|
||
|
for size in sizes:
|
||
|
matrix = np.random.rand(size, size)
|
||
|
for num_threads in num_threads_list:
|
||
|
start_time = time.time()
|
||
|
if num_threads == 1:
|
||
|
calculation_method = "Последовательный"
|
||
|
else:
|
||
|
calculation_method = f"Параллельный ({num_threads} потоков)"
|
||
|
det = calculate_determinant(matrix, num_threads)
|
||
|
end_time = time.time()
|
||
|
print(
|
||
|
f"Размер матрицы: {size}x{size}, Метод вычисления: {calculation_method}, Время: {end_time - start_time:.4f} сек.")
|