Logo
Fully annotated reference manual - version 1.8.12
Loading...
Searching...
No Matches
basiccpuenvironment.cpp
Go to the documentation of this file.
1/*
2 Copyright (C) 2023 Quaternion Risk Management Ltd
3 All rights reserved.
4
5 This file is part of ORE, a free-software/open-source library
6 for transparent pricing and risk analysis - http://opensourcerisk.org
7
8 ORE is free software: you can redistribute it and/or modify it
9 under the terms of the Modified BSD License. You should have received a
10 copy of the license along with this program.
11 The license is also available online at <http://opensourcerisk.org>
12
13 This program is distributed on the basis that it will form a useful
14 contribution to risk analytics and model standardisation, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the license for more details.
17*/
18
25
26#include <ql/errors.hpp>
27#include <ql/math/distributions/normaldistribution.hpp>
28#include <ql/math/randomnumbers/mt19937uniformrng.hpp>
29#include <ql/models/marketmodels/browniangenerators/mtbrowniangenerator.hpp>
30
31#include <boost/algorithm/string/join.hpp>
32#include <boost/timer/timer.hpp>
33
34namespace QuantExt {
35
36class BasicCpuContext : public ComputeContext {
37public:
38 BasicCpuContext();
39 ~BasicCpuContext() override final;
40 void init() override final;
41
42 std::pair<std::size_t, bool> initiateCalculation(const std::size_t n, const std::size_t id = 0,
43 const std::size_t version = 0,
44 const Settings settings = {}) override final;
45 void disposeCalculation(const std::size_t id) override final;
46 std::size_t createInputVariable(double v) override final;
47 std::size_t createInputVariable(double* v) override final;
48 std::vector<std::vector<std::size_t>> createInputVariates(const std::size_t dim,
49 const std::size_t steps) override final;
50 std::size_t applyOperation(const std::size_t randomVariableOpCode,
51 const std::vector<std::size_t>& args) override final;
52 void freeVariable(const std::size_t id) override final;
53 void declareOutputVariable(const std::size_t id) override final;
54 void finalizeCalculation(std::vector<double*>& output) override final;
55
56 bool supportsDoublePrecision() const override { return true; }
57
58 const DebugInfo& debugInfo() const override final;
59
60private:
61 enum class ComputeState { idle, createInput, createVariates, calc };
62
63 class program {
64 public:
65 program() {}
66 void clear() {
67 args_.clear();
68 op_.clear();
69 resultId_.clear();
70 }
71 std::size_t size() const { return args_.size(); }
72 void add(std::size_t resultId, std::size_t op, const std::vector<std::size_t>& args) {
73 args_.push_back(args);
74 op_.push_back(op);
75 resultId_.push_back(resultId);
76 }
77 const std::vector<std::size_t>& args(std::size_t i) const { return args_[i]; }
78 const std::size_t op(std::size_t i) const { return op_[i]; }
79 const std::size_t resultId(std::size_t i) const { return resultId_[i]; }
80
81 private:
82 std::vector<std::vector<std::size_t>> args_;
83 std::vector<std::size_t> op_;
84 std::vector<std::size_t> resultId_;
85 };
86
87 bool initialized_ = false;
88
89 // will be accumulated over all calcs
90 ComputeContext::DebugInfo debugInfo_;
91
92 // 1a vectors per current calc id
93
94 std::vector<std::size_t> size_;
95 std::vector<std::size_t> version_;
96 std::vector<bool> disposed_;
97 std::vector<program> program_;
98 std::vector<std::size_t> numberOfInputVars_;
99 std::vector<std::size_t> numberOfVariates_;
100 std::vector<std::size_t> numberOfVars_;
101 std::vector<std::vector<std::size_t>> outputVars_;
102
103 // 2 curent calc
104
105 std::size_t currentId_ = 0;
106 ComputeState currentState_ = ComputeState::idle;
107 Settings settings_;
108 bool newCalc_;
109
110 std::vector<RandomVariable> values_;
111 std::vector<std::size_t> freedVariables_;
112
113 // shared random variates for all calcs
114
115 std::unique_ptr<QuantLib::MersenneTwisterUniformRng> rng_;
116 QuantLib::InverseCumulativeNormal icn_;
117 std::vector<RandomVariable> variates_;
118};
119
120BasicCpuFramework::BasicCpuFramework() { contexts_["BasicCpu/Default/Default"] = new BasicCpuContext(); }
121
123 for (auto& [_, c] : contexts_) {
124 delete c;
125 }
126}
127
128BasicCpuContext::BasicCpuContext() : initialized_(false) {}
129
130BasicCpuContext::~BasicCpuContext() {}
131
132void BasicCpuContext::init() {
133
134 if (initialized_) {
135 return;
136 }
137
138 debugInfo_.numberOfOperations = 0;
139 debugInfo_.nanoSecondsDataCopy = 0;
140 debugInfo_.nanoSecondsProgramBuild = 0;
141 debugInfo_.nanoSecondsCalculation = 0;
142
143 initialized_ = true;
144}
145
146void BasicCpuContext::disposeCalculation(const std::size_t id) {
147 QL_REQUIRE(!disposed_[id - 1], "BasicCpuContext::disposeCalculation(): id " << id << " was already disposed.");
148 program_[id - 1].clear();
149 disposed_[id - 1] = true;
150}
151
152std::pair<std::size_t, bool> BasicCpuContext::initiateCalculation(const std::size_t n, const std::size_t id,
153 const std::size_t version, const Settings settings) {
154
155 QL_REQUIRE(n > 0, "BasicCpuContext::initiateCalculation(): n must not be zero");
156
157 newCalc_ = false;
158 settings_ = settings;
159
160 if (id == 0) {
161
162 // initiate new calcaultion
163
164 size_.push_back(n);
165 version_.push_back(version);
166 disposed_.push_back(false);
167 program_.push_back(program());
168 numberOfInputVars_.push_back(0);
169 numberOfVariates_.push_back(0);
170 numberOfVars_.push_back(0);
171 outputVars_.push_back({});
172
173 currentId_ = size_.size();
174 newCalc_ = true;
175
176 } else {
177
178 // initiate calculation on existing id
179
180 QL_REQUIRE(id <= size_.size(),
181 "BasicCpuContext::initiateCalculation(): id (" << id << ") invalid, got 1..." << size_.size());
182 QL_REQUIRE(size_[id - 1] == n, "BasicCpuContext::initiateCalculation(): size ("
183 << size_[id - 1] << ") for id " << id << " does not match current size ("
184 << n << ")");
185 QL_REQUIRE(!disposed_[id - 1], "BasicCpuContext::initiateCalculation(): id ("
186 << id << ") was already disposed, it can not be used any more.");
187
188 if (version != version_[id - 1]) {
189 version_[id - 1] = version;
190 program_[id - 1].clear();
191 numberOfInputVars_[id - 1] = 0;
192 numberOfVariates_[id - 1] = 0;
193 numberOfVars_[id - 1] = 0;
194 outputVars_[id - 1].clear();
195 newCalc_ = true;
196 }
197
198 currentId_ = id;
199 }
200
201 // reset variables
202
203 numberOfInputVars_[currentId_ - 1] = 0;
204
205 values_.clear();
206 if(newCalc_)
207 freedVariables_.clear();
208
209 // set state
210
211 currentState_ = ComputeState::createInput;
212
213 // return calc id
214
215 return std::make_pair(currentId_, newCalc_);
216}
217
218std::size_t BasicCpuContext::createInputVariable(double v) {
219 QL_REQUIRE(currentState_ == ComputeState::createInput,
220 "BasicCpuContext::createInputVariable(): not in state createInput (" << static_cast<int>(currentState_)
221 << ")");
222 values_.push_back(RandomVariable(size_[currentId_ - 1], v));
223 return numberOfInputVars_[currentId_ - 1]++;
224}
225
226std::size_t BasicCpuContext::createInputVariable(double* v) {
227 QL_REQUIRE(currentState_ == ComputeState::createInput,
228 "BasicCpuContext::createInputVariable(): not in state createInput (" << static_cast<int>(currentState_)
229 << ")");
230 values_.push_back(RandomVariable(size_[currentId_ - 1]));
231 for (std::size_t i = 0; i < size_[currentId_ - 1]; ++i)
232 values_.back().set(i, v[i]);
233 return numberOfInputVars_[currentId_ - 1]++;
234}
235
236std::vector<std::vector<std::size_t>> BasicCpuContext::createInputVariates(const std::size_t dim,
237 const std::size_t steps) {
238 QL_REQUIRE(currentState_ == ComputeState::createInput || currentState_ == ComputeState::createVariates,
239 "BasicCpuContext::createInputVariates(): not in state createInput or createVariates ("
240 << static_cast<int>(currentState_) << ")");
241 QL_REQUIRE(currentId_ > 0, "BasicCpuContext::freeVariable(): current id is not set");
242 QL_REQUIRE(newCalc_, "BasicCpuContext::createInputVariates(): id (" << currentId_ << ") in version "
243 << version_[currentId_ - 1] << " is replayed.");
244 currentState_ = ComputeState::createVariates;
245
246 if (rng_ == nullptr) {
247 rng_ = std::make_unique<MersenneTwisterUniformRng>(settings_.rngSeed);
248 }
249
250 if (variates_.size() < numberOfVariates_[currentId_ - 1] + dim * steps) {
251 for (std::size_t i = variates_.size(); i < numberOfVariates_[currentId_ - 1] + dim * steps; ++i) {
252 variates_.push_back(RandomVariable(size_[currentId_ - 1]));
253 for (std::size_t j = 0; j < variates_.back().size(); ++j)
254 variates_.back().set(j, icn_(rng_->nextReal()));
255 }
256 }
257
258 std::vector<std::vector<std::size_t>> resultIds(dim, std::vector<std::size_t>(steps));
259 for (std::size_t i = 0; i < dim; ++i) {
260 for (std::size_t j = 0; j < steps; ++j) {
261 resultIds[i][j] = numberOfInputVars_[currentId_ - 1] + numberOfVariates_[currentId_ - 1] + j * dim + i;
262 }
263 }
264
265 numberOfVariates_[currentId_ - 1] += dim * steps;
266
267 return resultIds;
268}
269
270std::size_t BasicCpuContext::applyOperation(const std::size_t randomVariableOpCode,
271 const std::vector<std::size_t>& args) {
272 QL_REQUIRE(currentState_ == ComputeState::createInput || currentState_ == ComputeState::createVariates ||
273 currentState_ == ComputeState::calc,
274 "BasicCpuContext::applyOperation(): not in state createInput or calc ("
275 << static_cast<int>(currentState_) << ")");
276 currentState_ = ComputeState::calc;
277 QL_REQUIRE(currentId_ > 0, "BasicCpuContext::applyOperation(): current id is not set");
278 QL_REQUIRE(newCalc_, "BasicCpuContext::applyOperation(): id (" << currentId_ << ") in version "
279 << version_[currentId_ - 1] << " is replayed.");
280
281 // determine variable id to use for result
282
283 std::size_t resultId;
284 if (!freedVariables_.empty()) {
285 resultId = freedVariables_.back();
286 freedVariables_.pop_back();
287 } else {
288 resultId =
289 numberOfInputVars_[currentId_ - 1] + numberOfVariates_[currentId_ - 1] + numberOfVars_[currentId_ - 1]++;
290 }
291
292 // store operation
293
294 program_[currentId_ - 1].add(resultId, randomVariableOpCode, args);
295
296 // update num of ops in debug info
297
298 if (settings_.debug)
299 debugInfo_.numberOfOperations += 1 * size_[currentId_ - 1];
300
301 // return result id
302
303 return resultId;
304}
305
306void BasicCpuContext::freeVariable(const std::size_t id) {
307 QL_REQUIRE(currentState_ == ComputeState::calc,
308 "BasicCpuContext::free(): not in state calc (" << static_cast<int>(currentState_) << ")");
309 QL_REQUIRE(currentId_ > 0, "BasicCpuContext::freeVariable(): current id is not set");
310 QL_REQUIRE(newCalc_, "BasicCpuContext::freeVariable(): id (" << currentId_ << ") in version "
311 << version_[currentId_ - 1] << " is replayed.");
312
313 // we do not free variates, since they are shared
314
315 if (id >= numberOfInputVars_[currentId_ - 1] &&
316 id < numberOfInputVars_[currentId_ - 1] + numberOfVariates_[currentId_ - 1])
317 return;
318
319 freedVariables_.push_back(id);
320}
321
322void BasicCpuContext::declareOutputVariable(const std::size_t id) {
323 QL_REQUIRE(currentState_ != ComputeState::idle, "BasicCpuContext::declareOutputVariable(): state is idle");
324 QL_REQUIRE(currentId_ > 0, "BasicCpuContext::declareOutputVariable(): current id not set");
325 QL_REQUIRE(newCalc_, "BasicCpuContext::declareOutputVariable(): id ("
326 << currentId_ << ") in version " << version_[currentId_ - 1] << " is replayed.");
327 outputVars_[currentId_ - 1].push_back(id);
328}
329
330void BasicCpuContext::finalizeCalculation(std::vector<double*>& output) {
331 struct exitGuard {
332 exitGuard() {}
333 ~exitGuard() { *currentState = ComputeState::idle; }
334 ComputeState* currentState;
335 } guard;
336
337 guard.currentState = &currentState_;
338
339 QL_REQUIRE(currentId_ > 0, "BasicCpuContext::finalizeCalculation(): current id is not set");
340 QL_REQUIRE(output.size() == outputVars_[currentId_ - 1].size(),
341 "BasicCpuContext::finalizeCalculation(): output size ("
342 << output.size() << ") inconsistent to kernel output size (" << outputVars_[currentId_ - 1].size()
343 << ")");
344
345 const auto& p = program_[currentId_ - 1];
346
347 auto ops = getRandomVariableOps(size_[currentId_ - 1], settings_.regressionOrder);
348
349 // resize values vector to required size
350
351 values_.resize(numberOfInputVars_[currentId_ - 1] + numberOfVars_[currentId_ - 1]);
352
353 // execute calculation
354
355 for (Size i = 0; i < program_[currentId_ - 1].size(); ++i) {
356 std::vector<const RandomVariable*> args(p.args(i).size());
357 for (Size j = 0; j < p.args(i).size(); ++j) {
358 if (p.args(i)[j] < numberOfInputVars_[currentId_ - 1])
359 args[j] = &values_[p.args(i)[j]];
360 else if (p.args(i)[j] < numberOfInputVars_[currentId_ - 1] + numberOfVariates_[currentId_ - 1])
361 args[j] = &variates_[p.args(i)[j] - numberOfInputVars_[currentId_ - 1]];
362 else
363 args[j] = &values_[p.args(i)[j] - numberOfVariates_[currentId_ - 1]];
364 }
365 if (p.resultId(i) < numberOfInputVars_[currentId_ - 1])
366 values_[p.resultId(i)] = ops[p.op(i)](args);
367 else if (p.resultId(i) >= numberOfInputVars_[currentId_ - 1] + numberOfVariates_[currentId_ - 1])
368 values_[p.resultId(i) - numberOfVariates_[currentId_ - 1]] = ops[p.op(i)](args);
369 else {
370 QL_FAIL("BasiCpuContext::finalizeCalculation(): internal error, result id "
371 << p.resultId(i) << " does not fall into values array.");
372 }
373 }
374
375 // fill output
376
377 for (Size i = 0; i < outputVars_[currentId_ - 1].size(); ++i) {
378 std::size_t id = outputVars_[currentId_ - 1][i];
379 RandomVariable* v;
380 if (id < numberOfInputVars_[currentId_ - 1])
381 v = &values_[id];
382 else if (id < numberOfInputVars_[currentId_ - 1] + numberOfVariates_[currentId_ - 1]) {
383 v = &variates_[id - numberOfInputVars_[currentId_ - 1]];
384 }
385 else
386 v = &values_[id - numberOfVariates_[currentId_ - 1]];
387 for (Size j = 0; j < size_[currentId_ - 1]; ++j) {
388 output[i][j] = v->operator[](j);
389 }
390 }
391}
392
393const ComputeContext::DebugInfo& BasicCpuContext::debugInfo() const { return debugInfo_; }
394
395std::set<std::string> BasicCpuFramework::getAvailableDevices() const { return {"BasicCpu/Default/Default"}; }
396
397ComputeContext* BasicCpuFramework::getContext(const std::string& deviceName) {
398 QL_REQUIRE(deviceName == "BasicCpu/Default/Default",
399 "BasicCpuFramework::getContext(): device '"
400 << deviceName << "' not supported. Available device is 'BasicCpu/Default/Default'.");
401 return contexts_[deviceName];
402}
403
404}; // namespace QuantExt
basic compute env implementation using the cpu
std::map< std::string, ComputeContext * > contexts_
ComputeContext * getContext(const std::string &deviceName) override final
std::set< std::string > getAvailableDevices() const override final
void finalizeCalculation(std::vector< std::vector< double > > &output)
base class for multi path generators
std::vector< RandomVariableOp > getRandomVariableOps(const Size size, const Size regressionOrder, QuantLib::LsmBasisSystem::PolynomialType polynomType, const double eps, QuantLib::Real regressionVarianceCutoff)
ops for type randomvariable
std::vector< Size > steps