Process of running on CUDA GPU, predicting behavior of elliptic curve properties secp256k1

18.03.2025

To accelerate the work on a CUDA GPU, predicting the behavior of the elliptic curve properties of secp256k1 and determining how much the sequence deviates from random, the following script can be developed. It combines data generation, neural network training, and the use of CUDA to speed up the calculations.

1. Data generation

Generating elliptic curve points and random data using the OpenSSL library:

cpp

#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <openssl/bn.h>
#include <openssl/ec.h>
#include <openssl/err.h>

using namespace std;

int main() {
int bits = 256;
unsigned char buf[32];
char *pr;

EC_GROUP *group;
EC_KEY *eckey = EC_KEY_new();
EC_POINT *P;

BN_CTX *ctx = BN_CTX_new();
BIGNUM *x = BN_new();
BIGNUM *n = BN_new();
BIGNUM *y = BN_new();

FILE *xFile = fopen("x600_btc_32_LH.bin", "wb");
FILE *yFile = fopen("y600_btc_32_LH.bin", "wb");
FILE *rFile = fopen("PM_rand_600_t.bin", "rb");

if (rFile == NULL) {
cout << "PM_rand_600_t.bin NOT FOUND" << endl;
return -1;
}

srand(time(NULL));
int nid = 714; // SECP256K1 curve
group = EC_GROUP_new_by_curve_name(nid);
EC_GROUP_precompute_mult(group, ctx);

P = EC_POINT_new(group);
BN_rand(n, bits, BN_RAND_TOP_ONE, BN_RAND_BOTTOM_ANY);

int NN = 60000; // Number of data points

for (int i = 0; i < NN; i++) {
if ((rand() % 128) < 16) { // Probability ratio 1:8
pr = (char *)"1";
EC_POINT_mul(group, P, n, NULL, NULL, ctx);
EC_POINT_get_affine_coordinates_GFp(group, P, x, y, ctx);
BN_bn2bin(y, buf);
} else {
pr = (char *)"0";
fread(buf, 32, 1, rFile); // Read random data
}
fwrite(buf, 32, 1, xFile);
fwrite(pr, 1, 1, yFile);
BN_add_word(n, 1L); // Increment private key
}

fclose(xFile);
fclose(yFile);
fclose(rFile);

BN_CTX_free(ctx);
EC_GROUP_free(group);
}

2. Preparing data for the neural network

Converting data into a format for training a neural network:

python

import numpy as np
from keras.utils import np_utils

num_classes = 2
length = 32
length_8 = length << 3
num_train = 50000
num_test = 10000

X_train = np.zeros((num_train, length_8), dtype='uint8')
X_test = np.zeros((num_test, length_8), dtype='uint8')
y_train = np.zeros((num_train), dtype='uint8')
y_test = np.zeros((num_test), dtype='uint8')

f_x = open("./x600_btc_32_LH.bin", 'rb')
f_y = open("./y600_btc_32_LH.bin", 'rb')

for k in range(num_train):
X_train[k] = np.frombuffer(f_x.read(length), dtype='uint8')
for k in range(num_test):
X_test[k] = np.frombuffer(f_x.read(length), dtype='uint8')

for i in range(num_train):
y_train[i] = ord(f_y.read(1))
for i in range(num_test):
y_test[i] = ord(f_y.read(1))

f_x.close()
f_y.close()

X_train = X_train.astype('float32') / 255.
X_test = X_test.astype('float32') / 255.
Y_train = np_utils.to_categorical(y_train - 48, num_classes)
Y_test = np_utils.to_categorical(y_test - 48, num_classes)

3. Neural network with custom activation

Creating a bidirectional GRU network with a custom activation function:

python

from keras.models import Model
from keras.layers import Dense, Input, Bidirectional, GRU
from keras.optimizers import RMSprop
from keras import backend as K
import math

def gaussian(x):
mu = 64.
sigma = 10.
xx = -0.5 * ((x - mu) / sigma) ** 2 / sigma / math.sqrt(2 * math.pi)
return K.exp(xx)

batch_size = 32
hidden_size_1 = 1024
hidden_size_2 = 1024

inp = Input(shape=(16,16,))
x = Bidirectional(GRU(hidden_size_1, return_sequences=True))(inp)
x = Bidirectional(GRU(hidden_size_2))(x)
x = Dense(hidden_size_1, activation='sigmoid')(x)
x = Dense(hidden_size_2, activation=gaussian)(x)
out = Dense(num_classes, activation='softmax')(x)

model = Model(inputs=inp, outputs=out)
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.0001),
metrics=['accuracy'])

model.fit(X_train.reshape(num_train,16,16), Y_train,
batch_size=batch_size,
epochs=16,
validation_data=(X_test.reshape(num_test,16,16), Y_test))

4. Using CUDA

To accelerate computations on the GPU, you need to install TensorFlow with CUDA support and make sure you have a compatible GPU. Make sure that the NVIDIA drivers and cuDNN libraries are installed correctly.

bash

# Установка TensorFlow с поддержкой GPU:
pip install tensorflow-gpu==2.x.x






This approach allows to efficiently generate data and train a neural network to analyze sequences of secp256k1 curve points.