|
| 1 | +/* |
| 2 | + * Polytimos algorithm |
| 3 | + */ |
| 4 | +extern "C" |
| 5 | +{ |
| 6 | +#include "sph/sph_skein.h" |
| 7 | +#include "sph/sph_shabal.h" |
| 8 | +#include "sph/sph_echo.h" |
| 9 | +#include "sph/sph_luffa.h" |
| 10 | +#include "sph/sph_fugue.h" |
| 11 | +#include "sph/sph_streebog.h" |
| 12 | +} |
| 13 | + |
| 14 | +#include "miner.h" |
| 15 | + |
| 16 | +#include "cuda_helper.h" |
| 17 | +#include "x11/cuda_x11.h" |
| 18 | + |
| 19 | +static uint32_t *d_hash[MAX_GPUS]; |
| 20 | +static uint32_t *d_resNonce[MAX_GPUS]; |
| 21 | + |
| 22 | +extern void skein512_cpu_setBlock_80(void *pdata); |
| 23 | +extern void skein512_cpu_hash_80(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_hash, int swap); |
| 24 | +extern void x14_shabal512_cpu_init(int thr_id, uint32_t threads); |
| 25 | +extern void x14_shabal512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order); |
| 26 | +extern void x11_cubehash512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order); |
| 27 | +extern void x13_fugue512_cpu_init(int thr_id, uint32_t threads); |
| 28 | +extern void x13_fugue512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order); |
| 29 | +extern void x13_fugue512_cpu_free(int thr_id); |
| 30 | +extern void streebog_sm3_set_target(uint32_t* ptarget); |
| 31 | +extern void streebog_sm3_hash_64_final(int thr_id, uint32_t threads, uint32_t *d_hash, uint32_t* d_resNonce); |
| 32 | +extern void skunk_streebog_set_target(uint32_t* ptarget); |
| 33 | +extern void skunk_cuda_streebog(int thr_id, uint32_t threads, uint32_t *d_hash, uint32_t* d_resNonce); |
| 34 | + |
| 35 | +// CPU Hash |
| 36 | +extern "C" void polytimos_hash(void *output, const void *input) |
| 37 | +{ |
| 38 | + sph_skein512_context ctx_skein; |
| 39 | + sph_shabal512_context ctx_shabal; |
| 40 | + sph_echo512_context ctx_echo; |
| 41 | + sph_luffa512_context ctx_luffa; |
| 42 | + sph_fugue512_context ctx_fugue; |
| 43 | + sph_gost512_context ctx_gost; |
| 44 | + |
| 45 | + uint32_t _ALIGN(128) hash[16]; |
| 46 | + memset(hash, 0, sizeof hash); |
| 47 | + |
| 48 | + sph_skein512_init(&ctx_skein); |
| 49 | + sph_skein512(&ctx_skein, input, 80); |
| 50 | + sph_skein512_close(&ctx_skein, (void*) hash); |
| 51 | + |
| 52 | + sph_shabal512_init(&ctx_shabal); |
| 53 | + sph_shabal512(&ctx_shabal, hash, 64); |
| 54 | + sph_shabal512_close(&ctx_shabal, hash); |
| 55 | + |
| 56 | + sph_echo512_init(&ctx_echo); |
| 57 | + sph_echo512(&ctx_echo, hash, 64); |
| 58 | + sph_echo512_close(&ctx_echo, hash); |
| 59 | + |
| 60 | + sph_luffa512_init(&ctx_luffa); |
| 61 | + sph_luffa512(&ctx_luffa, hash, 64); |
| 62 | + sph_luffa512_close(&ctx_luffa, hash); |
| 63 | + |
| 64 | + sph_fugue512_init(&ctx_fugue); |
| 65 | + sph_fugue512(&ctx_fugue, hash, 64); |
| 66 | + sph_fugue512_close(&ctx_fugue, hash); |
| 67 | + |
| 68 | + sph_gost512_init(&ctx_gost); |
| 69 | + sph_gost512(&ctx_gost, (const void*) hash, 64); |
| 70 | + sph_gost512_close(&ctx_gost, (void*) hash); |
| 71 | + |
| 72 | + memcpy(output, hash, 32); |
| 73 | +} |
| 74 | + |
| 75 | +static bool init[MAX_GPUS] = { 0 }; |
| 76 | +static bool use_compat_kernels[MAX_GPUS] = { 0 }; |
| 77 | + |
| 78 | +extern "C" int scanhash_polytimos(int thr_id, struct work* work, uint32_t max_nonce, unsigned long *hashes_done) |
| 79 | +{ |
| 80 | + int dev_id = device_map[thr_id]; |
| 81 | + uint32_t *pdata = work->data; |
| 82 | + uint32_t *ptarget = work->target; |
| 83 | + const uint32_t first_nonce = pdata[19]; |
| 84 | + int intensity = (device_sm[dev_id] > 500 && !is_windows()) ? 20 : 19; |
| 85 | + uint32_t throughput = cuda_default_throughput(thr_id, 1 << intensity); // 19=256*256*8; |
| 86 | + //if (init[thr_id]) throughput = min(throughput, max_nonce - first_nonce); |
| 87 | + |
| 88 | + if (opt_benchmark) |
| 89 | + ((uint32_t*)ptarget)[7] = 0x000f; |
| 90 | + |
| 91 | + if (!init[thr_id]) |
| 92 | + { |
| 93 | + cudaSetDevice(dev_id); |
| 94 | + if (opt_cudaschedule == -1 && gpu_threads == 1) { |
| 95 | + cudaDeviceReset(); |
| 96 | + // reduce cpu usage |
| 97 | + cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync); |
| 98 | + CUDA_LOG_ERROR(); |
| 99 | + } |
| 100 | + gpulog(LOG_INFO, thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput); |
| 101 | + |
| 102 | + cuda_get_arch(thr_id); |
| 103 | + use_compat_kernels[thr_id] = (cuda_arch[dev_id] < 500); |
| 104 | + |
| 105 | + quark_skein512_cpu_init(thr_id, throughput); |
| 106 | + x14_shabal512_cpu_init(thr_id, throughput); |
| 107 | + x11_echo512_cpu_init(thr_id, throughput); |
| 108 | + x11_luffa512_cpu_init(thr_id, throughput); |
| 109 | + x13_fugue512_cpu_init(thr_id, throughput); |
| 110 | + |
| 111 | + CUDA_CALL_OR_RET_X(cudaMalloc(&d_hash[thr_id], 16 * sizeof(uint32_t) * throughput), 0); |
| 112 | + CUDA_CALL_OR_RET_X(cudaMalloc(&d_resNonce[thr_id], 2 * sizeof(uint32_t)), -1); |
| 113 | + |
| 114 | + init[thr_id] = true; |
| 115 | + } |
| 116 | + |
| 117 | + |
| 118 | + uint32_t _ALIGN(64) h_resNonce[2]; |
| 119 | + uint32_t _ALIGN(64) endiandata[20]; |
| 120 | + for (int k=0; k < 20; k++) |
| 121 | + be32enc(&endiandata[k], pdata[k]); |
| 122 | + |
| 123 | + |
| 124 | + cudaMemset(d_resNonce[thr_id], 0xff, 2*sizeof(uint32_t)); |
| 125 | + skein512_cpu_setBlock_80(endiandata); |
| 126 | + if (use_compat_kernels[thr_id]) { |
| 127 | + streebog_sm3_set_target(ptarget); |
| 128 | + } else { |
| 129 | + skunk_streebog_set_target(ptarget); |
| 130 | + } |
| 131 | + |
| 132 | + do { |
| 133 | + int order = 0; |
| 134 | + |
| 135 | + skein512_cpu_hash_80(thr_id, throughput, pdata[19], d_hash[thr_id], order++); |
| 136 | + x14_shabal512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++); |
| 137 | + x11_echo512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++); |
| 138 | + x11_luffa512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++); |
| 139 | + x13_fugue512_cpu_hash_64(thr_id, throughput, pdata[19], NULL, d_hash[thr_id], order++); |
| 140 | + if (use_compat_kernels[thr_id]) { |
| 141 | + streebog_sm3_hash_64_final(thr_id, throughput, d_hash[thr_id], d_resNonce[thr_id]); |
| 142 | + } else { |
| 143 | + skunk_cuda_streebog(thr_id, throughput, d_hash[thr_id], d_resNonce[thr_id]); |
| 144 | + } |
| 145 | + |
| 146 | + *hashes_done = pdata[19] - first_nonce + throughput; |
| 147 | + |
| 148 | + cudaMemcpy(h_resNonce, d_resNonce[thr_id], 2 * sizeof(uint32_t), cudaMemcpyDeviceToHost); |
| 149 | + CUDA_LOG_ERROR(); |
| 150 | + |
| 151 | + if (h_resNonce[0] != UINT32_MAX) |
| 152 | + { |
| 153 | + const uint32_t Htarg = ptarget[7]; |
| 154 | + const uint32_t startNounce = pdata[19]; |
| 155 | + uint32_t _ALIGN(64) vhash[8]; |
| 156 | + |
| 157 | + be32enc(&endiandata[19], startNounce + h_resNonce[0]); |
| 158 | + polytimos_hash(vhash, endiandata); |
| 159 | + if (vhash[7] <= ptarget[7] && fulltest(vhash, ptarget)) { |
| 160 | + work->valid_nonces = 1; |
| 161 | + work->nonces[0] = startNounce + h_resNonce[0]; |
| 162 | + work_set_target_ratio(work, vhash); |
| 163 | + if (h_resNonce[1] != UINT32_MAX) { |
| 164 | + uint32_t secNonce = work->nonces[1] = startNounce + h_resNonce[1]; |
| 165 | + be32enc(&endiandata[19], secNonce); |
| 166 | + polytimos_hash(vhash, endiandata); |
| 167 | + bn_set_target_ratio(work, vhash, 1); |
| 168 | + work->valid_nonces++; |
| 169 | + pdata[19] = max(work->nonces[0], work->nonces[1]) + 1; |
| 170 | + } else { |
| 171 | + pdata[19] = work->nonces[0] + 1; // cursor |
| 172 | + } |
| 173 | + return work->valid_nonces; |
| 174 | + } |
| 175 | + else if (vhash[7] > Htarg) { |
| 176 | + gpu_increment_reject(thr_id); |
| 177 | + if (!opt_quiet) |
| 178 | + gpulog(LOG_WARNING, thr_id, "result for %08x does not validate on CPU!", work->nonces[0]); |
| 179 | + cudaMemset(d_resNonce[thr_id], 0xff, 2*sizeof(uint32_t)); |
| 180 | + pdata[19] = startNounce + h_resNonce[0] + 1; |
| 181 | + continue; |
| 182 | + } |
| 183 | + } |
| 184 | + |
| 185 | + if ((uint64_t)throughput + pdata[19] >= max_nonce) { |
| 186 | + pdata[19] = max_nonce; |
| 187 | + break; |
| 188 | + } |
| 189 | + pdata[19] += throughput; |
| 190 | + |
| 191 | + } while (!work_restart[thr_id].restart); |
| 192 | + |
| 193 | + *hashes_done = pdata[19] - first_nonce; |
| 194 | + |
| 195 | + CUDA_LOG_ERROR(); |
| 196 | + |
| 197 | + return 0; |
| 198 | +} |
| 199 | + |
| 200 | +// cleanup |
| 201 | +extern "C" void free_polytimos(int thr_id) |
| 202 | +{ |
| 203 | + if (!init[thr_id]) |
| 204 | + return; |
| 205 | + |
| 206 | + cudaThreadSynchronize(); |
| 207 | + |
| 208 | + cudaFree(d_hash[thr_id]); |
| 209 | + x13_fugue512_cpu_free(thr_id); |
| 210 | + cudaFree(d_resNonce[thr_id]); |
| 211 | + |
| 212 | + CUDA_LOG_ERROR(); |
| 213 | + |
| 214 | + cudaDeviceSynchronize(); |
| 215 | + init[thr_id] = false; |
| 216 | +} |
0 commit comments