From ad8fcdad5be6eb72829edec4ea43bda260ef725b Mon Sep 17 00:00:00 2001 From: oraqlle <41113853+oraqlle@users.noreply.github.com> Date: Sun, 16 Apr 2023 19:56:05 +1000 Subject: [PATCH] Fixed image folder location for distrib.-comp. chapter. Bumped version --- src/chapter5/challenges.md | 2 +- src/chapter5/distributed-computing.md | 6 +++--- src/{ => chapter5}/imgs/data_parallelism.jpg | Bin .../imgs/distributed_memory_architecture.png | Bin .../imgs/distributed_memory_architecture_2.png | Bin src/{ => chapter5}/imgs/distributed_vs_shared.png | Bin src/{ => chapter5}/imgs/htop.png | Bin src/{ => chapter5}/imgs/memory_architectures.jpg | Bin src/{ => chapter5}/imgs/mpi_datatypes.png | Bin src/{ => chapter5}/imgs/mpi_routines.png | Bin .../imgs/parallel_computing_arrays_eg.png | Bin src/{ => chapter5}/imgs/parallel_scalability.jpg | Bin src/{ => chapter5}/imgs/ping_pong.png | Bin src/{ => chapter5}/imgs/task_parallelism.jpg | Bin src/{ => chapter5}/imgs/time.png | Bin src/chapter5/openmpi.md | 6 +++--- src/chapter5/parallel-refresher.md | 10 +++++----- src/version.md | 2 +- 18 files changed, 13 insertions(+), 13 deletions(-) rename src/{ => chapter5}/imgs/data_parallelism.jpg (100%) rename src/{ => chapter5}/imgs/distributed_memory_architecture.png (100%) rename src/{ => chapter5}/imgs/distributed_memory_architecture_2.png (100%) rename src/{ => chapter5}/imgs/distributed_vs_shared.png (100%) rename src/{ => chapter5}/imgs/htop.png (100%) rename src/{ => chapter5}/imgs/memory_architectures.jpg (100%) rename src/{ => chapter5}/imgs/mpi_datatypes.png (100%) rename src/{ => chapter5}/imgs/mpi_routines.png (100%) rename src/{ => chapter5}/imgs/parallel_computing_arrays_eg.png (100%) rename src/{ => chapter5}/imgs/parallel_scalability.jpg (100%) rename src/{ => chapter5}/imgs/ping_pong.png (100%) rename src/{ => chapter5}/imgs/task_parallelism.jpg (100%) rename src/{ => chapter5}/imgs/time.png (100%) diff --git a/src/chapter5/challenges.md b/src/chapter5/challenges.md index 8a6fc32..cc62473 100644 --- a/src/chapter5/challenges.md +++ b/src/chapter5/challenges.md @@ -25,7 +25,7 @@ Output should be similar to this. May be slightly different due to process scheduling -![Ping pong](../imgs/ping_pong.png) +![Ping pong](imgs/ping_pong.png) ## Task 3: Monte Carlo diff --git a/src/chapter5/distributed-computing.md b/src/chapter5/distributed-computing.md index 84ba238..7aa688e 100644 --- a/src/chapter5/distributed-computing.md +++ b/src/chapter5/distributed-computing.md @@ -4,7 +4,7 @@ This essentially means it is a form of parallel computing, where the processing power is spread across multiple machines in a network rather than being contained within a single system. In this memory architecture, the problems are broken down into smaller parts, and each machine is assigned to work on a specific part. -![distributed memory architecture](../imgs/distributed_memory_architecture.png) +![distributed memory architecture](imgs/distributed_memory_architecture.png) ## Distributed Memory Architecture @@ -13,13 +13,13 @@ Lets have a look at the distributed memory architecture in more details. - Each processor has its own local memory, with its own address space - Data is shared via a communications network using a network protocol, e.g Transmission Control Protocol (TCP), Infiniband etc.. -![Distributed Memory Architecture](../imgs/distributed_memory_architecture_2.png) +![Distributed Memory Architecture](imgs/distributed_memory_architecture_2.png) ## Distributed vs Shared program execution The following diagram provides another way of looking at the differences between distributed and shared memory architecture and their program execution. -![Distributed vs Shared](../imgs/distributed_vs_shared.png) +![Distributed vs Shared](imgs/distributed_vs_shared.png) ## Advantages of distributed computing diff --git a/src/imgs/data_parallelism.jpg b/src/chapter5/imgs/data_parallelism.jpg similarity index 100% rename from src/imgs/data_parallelism.jpg rename to src/chapter5/imgs/data_parallelism.jpg diff --git a/src/imgs/distributed_memory_architecture.png b/src/chapter5/imgs/distributed_memory_architecture.png similarity index 100% rename from src/imgs/distributed_memory_architecture.png rename to src/chapter5/imgs/distributed_memory_architecture.png diff --git a/src/imgs/distributed_memory_architecture_2.png b/src/chapter5/imgs/distributed_memory_architecture_2.png similarity index 100% rename from src/imgs/distributed_memory_architecture_2.png rename to src/chapter5/imgs/distributed_memory_architecture_2.png diff --git a/src/imgs/distributed_vs_shared.png b/src/chapter5/imgs/distributed_vs_shared.png similarity index 100% rename from src/imgs/distributed_vs_shared.png rename to src/chapter5/imgs/distributed_vs_shared.png diff --git a/src/imgs/htop.png b/src/chapter5/imgs/htop.png similarity index 100% rename from src/imgs/htop.png rename to src/chapter5/imgs/htop.png diff --git a/src/imgs/memory_architectures.jpg b/src/chapter5/imgs/memory_architectures.jpg similarity index 100% rename from src/imgs/memory_architectures.jpg rename to src/chapter5/imgs/memory_architectures.jpg diff --git a/src/imgs/mpi_datatypes.png b/src/chapter5/imgs/mpi_datatypes.png similarity index 100% rename from src/imgs/mpi_datatypes.png rename to src/chapter5/imgs/mpi_datatypes.png diff --git a/src/imgs/mpi_routines.png b/src/chapter5/imgs/mpi_routines.png similarity index 100% rename from src/imgs/mpi_routines.png rename to src/chapter5/imgs/mpi_routines.png diff --git a/src/imgs/parallel_computing_arrays_eg.png b/src/chapter5/imgs/parallel_computing_arrays_eg.png similarity index 100% rename from src/imgs/parallel_computing_arrays_eg.png rename to src/chapter5/imgs/parallel_computing_arrays_eg.png diff --git a/src/imgs/parallel_scalability.jpg b/src/chapter5/imgs/parallel_scalability.jpg similarity index 100% rename from src/imgs/parallel_scalability.jpg rename to src/chapter5/imgs/parallel_scalability.jpg diff --git a/src/imgs/ping_pong.png b/src/chapter5/imgs/ping_pong.png similarity index 100% rename from src/imgs/ping_pong.png rename to src/chapter5/imgs/ping_pong.png diff --git a/src/imgs/task_parallelism.jpg b/src/chapter5/imgs/task_parallelism.jpg similarity index 100% rename from src/imgs/task_parallelism.jpg rename to src/chapter5/imgs/task_parallelism.jpg diff --git a/src/imgs/time.png b/src/chapter5/imgs/time.png similarity index 100% rename from src/imgs/time.png rename to src/chapter5/imgs/time.png diff --git a/src/chapter5/openmpi.md b/src/chapter5/openmpi.md index 54344e6..6e89875 100644 --- a/src/chapter5/openmpi.md +++ b/src/chapter5/openmpi.md @@ -22,7 +22,7 @@ int MPI_Comm_rank(MPI_Comm comm, int \* rank); // rank contains the value for that process- the function return value is an error code ``` -![MPI routines](../imgs/mpi_routines.png) +![MPI routines](imgs/mpi_routines.png) ### Point-to-Point communication @@ -242,7 +242,7 @@ mpirun -np 4 ./my-awesome-program There are some useful commands to check the parallelism of the code. The command top or htop looks into a process. As you can see from the image below, it shows the CPU usages -![htop](../imgs/htop.png) +![htop](imgs/htop.png) - The command ```time``` checks the overall performance of the code - By running this command, you get real time, user time and system time. @@ -251,4 +251,4 @@ The command top or htop looks into a process. As you can see from the image belo - Sys is the amount of CPU time spent in the kernel within the process. - User time +Sys time will tell you how much actual CPU time your process used. -![time](../imgs/time.png) +![time](imgs/time.png) diff --git a/src/chapter5/parallel-refresher.md b/src/chapter5/parallel-refresher.md index 80ad6b6..c4dbc3c 100644 --- a/src/chapter5/parallel-refresher.md +++ b/src/chapter5/parallel-refresher.md @@ -4,28 +4,28 @@ We saw in the last chapter parallel computing can be used to solve problems by executing code in parallel as opposed to in series. -![Task parallelism](../imgs/task_parallelism.jpg) +![Task parallelism](imgs/task_parallelism.jpg) ## Data Parallelism Note that not all programs can be broken down into independent tasks and we might instead data parallelism like the following. -![Data parallelism](../imgs/data_parallelism.jpg) +![Data parallelism](imgs/data_parallelism.jpg) ## Parallel computing example Think back to the example below which was provided in the last chapter. We will look at the cost of memory transactions soon. -![Parallel computing example](../imgs/parallel_computing_arrays_eg.png) +![Parallel computing example](imgs/parallel_computing_arrays_eg.png) ## Parallel Scalability The speed up achieved from parallelism is dictated by your algorithm. Notably the serial bits of your algorithm can not be sped up by increasing the number of processors. The diagram below looks at the benefits we can achieve from writing parallel code as the number of processes increases. -![Parallel scalability](../imgs/parallel_scalability.jpg) +![Parallel scalability](imgs/parallel_scalability.jpg) ## Memory Architectures Lastly, the different memory architectures we looked at in the last section included shared memory, distributed memory and hybrid architectures. We have looked at shared memory in detail and now we will dive into distributed memory architecture. -![Memory architectures](../imgs/memory_architectures.jpg) +![Memory architectures](imgs/memory_architectures.jpg) diff --git a/src/version.md b/src/version.md index 72b3e42..8f1bfe8 100644 --- a/src/version.md +++ b/src/version.md @@ -1 +1 @@ -version: 0.1.0 \ No newline at end of file +version: 1.0.0 \ No newline at end of file