Skip to content

Commit

Permalink
trim logs
Browse files Browse the repository at this point in the history
  • Loading branch information
luoxiaojian committed Sep 18, 2024
1 parent 74ea0ac commit 260ae20
Show file tree
Hide file tree
Showing 4 changed files with 12 additions and 50 deletions.
5 changes: 0 additions & 5 deletions examples/analytical_apps/bc/bc_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,11 +56,6 @@ class BCContext : public VertexDataContext<FRAG_T, float> {
LOG(INFO) << "[frag-" << frag.fid()
<< "] BC(0) = " << centrality_value[s];
}
#ifdef PROFILING
VLOG(2) << "preprocess_time: " << preprocess_time << "s.";
VLOG(2) << "exec_time: " << exec_time << "s.";
VLOG(2) << "postprocess_time: " << postprocess_time << "s.";
#endif
}

oid_t source_id;
Expand Down
5 changes: 0 additions & 5 deletions examples/analytical_apps/bfs/bfs_opt_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,11 +49,6 @@ class BFSOptContext : public VertexDataContext<FRAG_T, int64_t> {
for (auto v : inner_vertices) {
os << frag.GetId(v) << " " << partial_result[v] << std::endl;
}
#ifdef PROFILING
VLOG(2) << "preprocess_time: " << preprocess_time << "s.";
VLOG(2) << "exec_time: " << exec_time << "s.";
VLOG(2) << "postprocess_time: " << postprocess_time << "s.";
#endif
}

oid_t source_id;
Expand Down
28 changes: 0 additions & 28 deletions examples/analytical_apps/sssp/sssp_opt.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,6 @@ class SSSPOpt : public ParallelAppBase<FRAG_T, SSSPOptContext<FRAG_T>,
vertex_t source;
bool native_source = frag.GetInnerVertex(ctx.source_id, source);

#ifdef PROFILING
ctx.exec_time -= GetCurrentTime();
#endif

ctx.next_modified.ParallelClear(GetThreadPool());

// Get the channel. Messages assigned to this channel will be sent by the
Expand All @@ -83,17 +79,9 @@ class SSSPOpt : public ParallelAppBase<FRAG_T, SSSPOptContext<FRAG_T>,
}
}

#ifdef PROFILING
ctx.exec_time += GetCurrentTime();
ctx.postprocess_time -= GetCurrentTime();
#endif

messages.ForceContinue();

ctx.next_modified.Swap(ctx.curr_modified);
#ifdef PROFILING
ctx.postprocess_time += GetCurrentTime();
#endif
}

/**
Expand All @@ -109,10 +97,6 @@ class SSSPOpt : public ParallelAppBase<FRAG_T, SSSPOptContext<FRAG_T>,

auto& channels = messages.Channels();

#ifdef PROFILING
ctx.preprocess_time -= GetCurrentTime();
#endif

ctx.next_modified.ParallelClear(GetThreadPool());

// parallel process and reduce the received messages
Expand All @@ -124,11 +108,6 @@ class SSSPOpt : public ParallelAppBase<FRAG_T, SSSPOptContext<FRAG_T>,
}
});

#ifdef PROFILING
ctx.preprocess_time += GetCurrentTime();
ctx.exec_time -= GetCurrentTime();
#endif

// incremental evaluation.
ForEach(ctx.curr_modified, inner_vertices,
[&frag, &ctx](int tid, vertex_t v) {
Expand All @@ -146,10 +125,6 @@ class SSSPOpt : public ParallelAppBase<FRAG_T, SSSPOptContext<FRAG_T>,

// put messages into channels corresponding to the destination fragments.

#ifdef PROFILING
ctx.exec_time += GetCurrentTime();
ctx.postprocess_time -= GetCurrentTime();
#endif
auto outer_vertices = frag.OuterVertices();
ForEach(ctx.next_modified, outer_vertices,
[&channels, &frag, &ctx](int tid, vertex_t v) {
Expand All @@ -164,9 +139,6 @@ class SSSPOpt : public ParallelAppBase<FRAG_T, SSSPOptContext<FRAG_T>,
}

ctx.next_modified.Swap(ctx.curr_modified);
#ifdef PROFILING
ctx.postprocess_time += GetCurrentTime();
#endif
}

void EstimateMessageSize(const fragment_t& frag, size_t& send_size,
Expand Down
24 changes: 12 additions & 12 deletions grape/communication/sync_comm.h
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,8 @@ static inline void send_buffer(const T* ptr, size_t len, int dst_worker_id,
const size_t chunk_size_in_bytes = chunk_num * sizeof(T);
int iter = len / chunk_num;
size_t remaining = (len % chunk_num) * sizeof(T);
LOG(INFO) << "sending large buffer in " << iter + (remaining != 0)
<< " iterations";
VLOG(10) << "sending large buffer in " << iter + (remaining != 0)
<< " iterations";
for (int i = 0; i < iter; ++i) {
MPI_Send(ptr, chunk_size_in_bytes, MPI_CHAR, dst_worker_id, tag, comm);
ptr += chunk_num;
Expand All @@ -120,8 +120,8 @@ static inline void isend_buffer(const T* ptr, size_t len, int dst_worker_id,
const size_t chunk_size_in_bytes = chunk_num * sizeof(T);
int iter = len / chunk_num;
size_t remaining = (len % chunk_num) * sizeof(T);
LOG(INFO) << "isending large buffer in " << iter + (remaining != 0)
<< " iterations";
VLOG(10) << "isending large buffer in " << iter + (remaining != 0)
<< " iterations";
for (int i = 0; i < iter; ++i) {
MPI_Request req;
MPI_Isend(ptr, chunk_size_in_bytes, MPI_CHAR, dst_worker_id, tag, comm,
Expand All @@ -147,8 +147,8 @@ static inline void recv_buffer(T* ptr, size_t len, int src_worker_id, int tag,
const size_t chunk_size_in_bytes = chunk_num * sizeof(T);
int iter = len / chunk_num;
size_t remaining = (len % chunk_num) * sizeof(T);
LOG(INFO) << "recving large buffer in " << iter + (remaining != 0)
<< " iterations";
VLOG(10) << "recving large buffer in " << iter + (remaining != 0)
<< " iterations";
for (int i = 0; i < iter; ++i) {
MPI_Recv(ptr, chunk_size_in_bytes, MPI_CHAR, src_worker_id, tag, comm,
MPI_STATUS_IGNORE);
Expand Down Expand Up @@ -177,8 +177,8 @@ static inline void irecv_buffer(T* ptr, size_t len, int src_worker_id, int tag,
const size_t chunk_size_in_bytes = chunk_num * sizeof(T);
int iter = len / chunk_num;
size_t remaining = (len % chunk_num) * sizeof(T);
LOG(INFO) << "irecving large buffer in " << iter + (remaining != 0)
<< " iterations";
VLOG(10) << "irecving large buffer in " << iter + (remaining != 0)
<< " iterations";
for (int i = 0; i < iter; ++i) {
MPI_Irecv(ptr, chunk_size_in_bytes, MPI_CHAR, src_worker_id, tag, comm,
&reqs[i]);
Expand All @@ -202,8 +202,8 @@ static inline void irecv_buffer(T* ptr, size_t len, int src_worker_id, int tag,
const size_t chunk_size_in_bytes = chunk_num * sizeof(T);
int iter = len / chunk_num;
size_t remaining = (len % chunk_num) * sizeof(T);
LOG(INFO) << "irecving large buffer in " << iter + (remaining != 0)
<< " iterations";
VLOG(10) << "irecving large buffer in " << iter + (remaining != 0)
<< " iterations";
for (int i = 0; i < iter; ++i) {
MPI_Request req;
MPI_Irecv(ptr, chunk_size_in_bytes, MPI_CHAR, src_worker_id, tag, comm,
Expand Down Expand Up @@ -236,8 +236,8 @@ static inline void bcast_buffer(T* ptr, size_t len, int root, MPI_Comm comm) {
const size_t chunk_size_in_bytes = chunk_num * sizeof(T);
int iter = len / chunk_num;
size_t remaining = (len % chunk_num) * sizeof(T);
LOG(INFO) << "bcast large buffer in " << iter + (remaining != 0)
<< " iterations";
VLOG(10) << "bcast large buffer in " << iter + (remaining != 0)
<< " iterations";
for (int i = 0; i < iter; ++i) {
MPI_Bcast(ptr, chunk_size_in_bytes, MPI_CHAR, root, comm);
ptr += chunk_num;
Expand Down

0 comments on commit 260ae20

Please sign in to comment.