Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ORCA asynchronous sampling #2205

Merged
merged 30 commits into from
Oct 8, 2024
Merged
Show file tree
Hide file tree
Changes from 11 commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
738dc4b
Refactor orca qpu
Omar-ORCA Aug 22, 2024
ee1518d
Refactor
Omar-ORCA Aug 27, 2024
895cba1
Refactor
Omar-ORCA Aug 28, 2024
ae09f9d
Merge branch 'NVIDIA:main' into async_sampling
Omar-ORCA Aug 28, 2024
e6cf082
Python bindings
Omar-ORCA Aug 28, 2024
bb815a2
Add bearer token
Omar-ORCA Sep 11, 2024
8c7c90d
Merge branch 'main' into async_sampling
Omar-ORCA Sep 11, 2024
a6b149c
Merge branch 'NVIDIA:main' into async_sampling
Omar-ORCA Sep 13, 2024
f340222
Wait for samples
Omar-ORCA Sep 13, 2024
e83928b
* Code formatting
khalatepradnya Sep 16, 2024
721162b
* Fix documentation errors
khalatepradnya Sep 18, 2024
9377997
* Addressing review comments
khalatepradnya Sep 19, 2024
66f409f
Merge branch 'main' into async_sampling
Omar-ORCA Sep 20, 2024
4c364c0
Merge branch 'NVIDIA:main' into async_sampling
Omar-ORCA Sep 24, 2024
ef06e36
Add `qpu_id` parameter to sample and sample_async
Omar-ORCA Sep 27, 2024
5ad8013
Merge branch 'main' into async_sampling
Omar-ORCA Sep 27, 2024
669754a
Throw exception instead of returning a dummy payload
Omar-ORCA Sep 27, 2024
339d7d8
Typo in Executor.cpp
Omar-ORCA Sep 27, 2024
58b209b
Merge branch 'main' into async_sampling
khalatepradnya Sep 30, 2024
e336abd
* C++ source code formatting
khalatepradnya Sep 30, 2024
96679b4
* Doxygen fixes
khalatepradnya Sep 30, 2024
efca875
* Missing doxygen for the newly added asynchronous sampling.
khalatepradnya Sep 30, 2024
06b888c
Merge branch 'main' into async_sampling
khalatepradnya Oct 1, 2024
4e199f9
* Doxygen fix for the async sample result
khalatepradnya Oct 1, 2024
a385d6c
Merge branch 'main' into async_sampling
khalatepradnya Oct 3, 2024
9b979f2
* Addressing review comment
khalatepradnya Oct 4, 2024
229a8f9
* Spell check
khalatepradnya Oct 4, 2024
8d58d11
Merge branch 'main' into async_sampling
khalatepradnya Oct 7, 2024
b8b2de1
* Restoring file
khalatepradnya Oct 7, 2024
9526c3e
Merge branch 'main' into async_sampling
khalatepradnya Oct 7, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion docs/sphinx/api/languages/cpp_api.rst
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,9 @@ Platform

.. doxygenclass:: cudaq::BaseRemoteSimulatorQPU

.. doxygenclass:: cudaq::BaseNvcfSimulatorQPU
.. doxygenclass:: cudaq::BaseNvcfSimulatorQPU

.. doxygenclass:: cudaq::OrcaRemoteRESTQPU

.. doxygenclass:: cudaq::quantum_platform
:members:
Expand Down
39 changes: 35 additions & 4 deletions docs/sphinx/examples/cpp/providers/orca.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,12 @@
#include "cudaq/orca.h"
#include "cudaq.h"

#include <fstream>
#include <iostream>

#include <chrono>
#include <thread>

// define helper function to generate linear spaced vectors
template <typename T>
void linear_spaced_vector(std::vector<T> &xs, T min, T max, std::size_t N) {
Expand All @@ -20,6 +26,8 @@ void linear_spaced_vector(std::vector<T> &xs, T min, T max, std::size_t N) {
}

int main() {
using namespace std::this_thread; // sleep_for, sleep_until
using namespace std::chrono_literals; // `ns`, `us`, `ms`, `s`, `h`, etc.

// A time-bin boson sampling experiment: An input state of 4 indistinguishable
// photons mixed with 4 vacuum states across 8 time bins (modes) enter the
Expand Down Expand Up @@ -60,11 +68,15 @@ int main() {
// we can also set number of requested samples
int n_samples{10000};

// Submit to ORCA synchronously (e.g., wait for the job result to be returned
// before proceeding with the rest of the execution).
// Submit to ORCA synchronously (e.g., wait for the job result to be
// returned before proceeding with the rest of the execution).
std::cout << "Submitting to ORCA Server synchronously" << std::endl;
auto counts =
cudaq::orca::sample(input_state, loop_lengths, bs_angles, n_samples);

// Print the results
counts.dump();

// If the system includes phase shifters, the phase shifter angles can be
// included in the call

Expand All @@ -73,8 +85,27 @@ int main() {
// ps_angles, n_samples);
// ```

// Print the results
counts.dump();
// Alternatively we can submit to ORCA asynchronously (e.g., continue
// executing code in the file until the job has been returned).
std::cout << "Submitting to ORCA Server asynchronously" << std::endl;
auto async_results = cudaq::orca::sample_async(input_state, loop_lengths,
bs_angles, n_samples);

// Can write the future to file:
{
std::ofstream out("saveMe.json");
out << async_results;
}

// Then come back and read it in later.
cudaq::async_result<cudaq::sample_result> readIn;
std::ifstream in("saveMe.json");
in >> readIn;

sleep_for(200ms); // wait for the job to be processed
khalatepradnya marked this conversation as resolved.
Show resolved Hide resolved
// Get the results of the read in future.
auto async_counts = readIn.get();
async_counts.dump();

return 0;
}
32 changes: 32 additions & 0 deletions docs/sphinx/examples/python/providers/orca.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import cudaq
import time

import numpy as np
import os
Expand Down Expand Up @@ -45,9 +46,11 @@
# we can also set number of requested samples
n_samples = 10000

# Option A:
# By using the synchronous `cudaq.orca.sample`, the execution of
# any remaining classical code in the file will occur only
# after the job has been returned from ORCA Server.
print("Submitting to ORCA Server synchronously")
counts = cudaq.orca.sample(input_state, loop_lengths, bs_angles, n_samples)

# If the system includes phase shifters, the phase shifter angles can be
Expand All @@ -59,3 +62,32 @@

# Print the results
print(counts)

# Option B:
# By using the asynchronous `cudaq.orca.sample_async`, the remaining
# classical code will be executed while the job is being handled
# by Orca. This is ideal when submitting via a queue over
# the cloud.
print("Submitting to ORCA Server asynchronously")
async_results = cudaq.orca.sample_async(input_state, loop_lengths, bs_angles,
n_samples)
# ... more classical code to run ...

# We can either retrieve the results later in the program with
# ```
# async_counts = async_results.get()
# ```
# or we can also write the job reference (`async_results`) to
# a file and load it later or from a different process.
file = open("future.txt", "w")
file.write(str(async_results))
file.close()

# We can later read the file content and retrieve the job
# information and results.
time.sleep(0.2) # wait for the job to be processed
same_file = open("future.txt", "r")
retrieved_async_results = cudaq.AsyncSampleResult(str(same_file.read()))

counts = retrieved_async_results.get()
print(counts)
10 changes: 10 additions & 0 deletions docs/sphinx/using/backends/hardware.rst
Original file line number Diff line number Diff line change
Expand Up @@ -312,6 +312,16 @@ configuration.

export ORCA_ACCESS_URL="https://<ORCA API Server>"


Sometimes the requests to the PT-1 require an authentication token. This token can be set as an
1tnguyen marked this conversation as resolved.
Show resolved Hide resolved
environment variable named ``ORCA_AUTH_TOKEN``. For example, if the token is :code:`AbCdEf123456`,
you can set the environment variable as follows:

.. code:: bash

export ORCA_AUTH_TOKEN="AbCdEf123456"


Submission from C++
`````````````````````````

Expand Down
2 changes: 2 additions & 0 deletions python/extension/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,8 @@ declare_mlir_python_extension(CUDAQuantumPythonSources.Extension
../../runtime/cudaq/platform/common/QuantumExecutionQueue.cpp
../../runtime/cudaq/platform/default/rest_server/RemoteRuntimeClient.cpp
../../runtime/cudaq/platform/orca/OrcaQPU.cpp
../../runtime/cudaq/platform/orca/OrcaRemoteRESTQPU.cpp
../../runtime/cudaq/platform/orca/OrcaServerHelper.cpp
../../runtime/common/ArgumentConversion.cpp

EMBED_CAPI_LINK_LIBS
Expand Down
18 changes: 17 additions & 1 deletion python/extension/CUDAQuantumExtension.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,23 @@ PYBIND11_MODULE(_quakeDialects, m) {
"ORCA's backends",
py::arg("input_state"), py::arg("loop_lengths"), py::arg("bs_angles"),
py::arg("n_samples") = 10000);
orcaSubmodule.def(
"sample_async",
py::overload_cast<std::vector<std::size_t> &, std::vector<std::size_t> &,
std::vector<double> &, std::vector<double> &, int>(
&cudaq::orca::sample_async),
"Performs Time Bin Interferometer (TBI) boson sampling experiments on "
"ORCA's backends",
py::arg("input_state"), py::arg("loop_lengths"), py::arg("bs_angles"),
py::arg("ps_angles") = nullptr, py::arg("n_samples") = 10000);
Omar-ORCA marked this conversation as resolved.
Show resolved Hide resolved
orcaSubmodule.def(
"sample_async",
py::overload_cast<std::vector<std::size_t> &, std::vector<std::size_t> &,
std::vector<double> &, int>(&cudaq::orca::sample_async),
"Performs Time Bin Interferometer (TBI) boson sampling experiments on "
"ORCA's backends",
py::arg("input_state"), py::arg("loop_lengths"), py::arg("bs_angles"),
py::arg("n_samples") = 10000);

auto photonicsSubmodule = cudaqRuntime.def_submodule("photonics");
photonicsSubmodule.def(
Expand Down Expand Up @@ -215,7 +232,6 @@ PYBIND11_MODULE(_quakeDialects, m) {
cudaq::getExecutionManager()->returnQudit(cudaq::QuditInfo(level, id));
},
"Release a qudit of given id.", py::arg("level"), py::arg("id"));

cudaqRuntime.def("cloneModule",
[](MlirModule mod) { return wrap(unwrap(mod).clone()); });
cudaqRuntime.def("isTerminator", [](MlirOperation op) {
Expand Down
38 changes: 37 additions & 1 deletion runtime/common/Executor.cpp
khalatepradnya marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -55,4 +55,40 @@ Executor::execute(std::vector<KernelExecution> &codesToExecute) {
std::string name = serverHelper->name();
return details::future(ids, name, config);
}
} // namespace cudaq

details::future Executor::execute(cudaq::orca::TBIParameters params,
const std::string &kernelName) {

serverHelper->setShots(shots);

cudaq::info("Executor creating job to execute with the {} helper.",
serverHelper->name());

// Create the Job Payload, composed of job post path, headers,
// and the job json messages themselves
auto [jobPostPath, headers, jobs] = serverHelper->createJob(params);
auto job = jobs[0];
auto config = serverHelper->getConfig();

std::vector<cudaq::details::future::Job> ids;
cudaq::info("Job created, posting to {}", jobPostPath);

// Post it, get the response
auto response = client.post(jobPostPath, "", job, headers);
cudaq::info("Job posted, response was {}", response.dump());

// // Add the job id and the job name.
Omar-ORCA marked this conversation as resolved.
Show resolved Hide resolved
auto job_id = serverHelper->extractJobId(response);
if (job_id.empty()) {
nlohmann::json tmp(job.at("job_id"));
serverHelper->constructGetJobPath(tmp[0]);
job_id = tmp[0].at("job_id");
}
ids.emplace_back(job_id, kernelName);
config["output_names." + job_id] = kernelName;

config.insert({"shots", std::to_string(shots)});
std::string name = serverHelper->name();
return cudaq::details::future(ids, name, config);
}
} // namespace cudaq
6 changes: 6 additions & 0 deletions runtime/common/Executor.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,12 @@ class Executor : public registry::RegisteredType<Executor> {
/// @brief Execute the provided quantum codes and return a future object
/// The caller can make this synchronous by just immediately calling .get().
details::future execute(std::vector<KernelExecution> &codesToExecute);

/// @brief Execute the provided ORCA quantum parameters and return a future
/// object The caller can make this synchronous by just immediately calling
khalatepradnya marked this conversation as resolved.
Show resolved Hide resolved
/// .get().
details::future execute(cudaq::orca::TBIParameters params,
const std::string &kernelName);
};

} // namespace cudaq
14 changes: 14 additions & 0 deletions runtime/common/ServerHelper.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include "Future.h"
#include "MeasureCounts.h"
#include "Registry.h"
#include "cudaq/orca.h"
khalatepradnya marked this conversation as resolved.
Show resolved Hide resolved
#include <filesystem>

namespace cudaq {
Expand Down Expand Up @@ -103,6 +104,19 @@ class ServerHelper : public registry::RegisteredType<ServerHelper> {
virtual ServerJobPayload
createJob(std::vector<KernelExecution> &circuitCodes) = 0;

/// @brief Create a job payload for the provided TBI parameters
virtual ServerJobPayload createJob(cudaq::orca::TBIParameters params) {
std::vector<ServerMessage> jobs;
ServerMessage job;
jobs.push_back(job);

std::map<std::string, std::string> headers;

// Return a tuple containing the job path, headers, and the job message
auto ret = std::make_tuple("", headers, jobs);
return ret;
khalatepradnya marked this conversation as resolved.
Show resolved Hide resolved
Omar-ORCA marked this conversation as resolved.
Show resolved Hide resolved
};

/// @brief Extract the job id from the server response from posting the job.
virtual std::string extractJobId(ServerMessage &postResponse) = 0;

Expand Down
7 changes: 6 additions & 1 deletion runtime/cudaq/platform/orca/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,13 @@

set(LIBRARY_NAME cudaq-orca-qpu)
message(STATUS "Building ORCA REST QPU.")
set(ORCA_SRC
OrcaServerHelper.cpp
OrcaRemoteRESTQPU.cpp
OrcaQPU.cpp
)

add_library(${LIBRARY_NAME} SHARED OrcaQPU.cpp)
add_library(${LIBRARY_NAME} SHARED ${ORCA_SRC})

target_include_directories(${LIBRARY_NAME} PRIVATE .
PUBLIC
Expand Down
Loading
Loading