Logo
Fully annotated reference manual - version 1.8.12
Loading...
Searching...
No Matches
hullwhitebucketing.cpp
Go to the documentation of this file.
1/*
2 Copyright (C) 2017 Quaternion Risk Management Ltd
3 All rights reserved.
4
5 This file is part of ORE, a free-software/open-source library
6 for transparent pricing and risk analysis - http://opensourcerisk.org
7
8 ORE is free software: you can redistribute it and/or modify it
9 under the terms of the Modified BSD License. You should have received a
10 copy of the license along with this program.
11 The license is also available online at <http://opensourcerisk.org>
12
13 This program is distributed on the basis that it will form a useful
14 contribution to risk analytics and model standardisation, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the license for more details.
17*/
18
19#include "toplevelfixture.hpp"
20#include <boost/test/unit_test.hpp>
21#include <oret/datapaths.hpp>
22
24
25#include <ql/math/comparison.hpp>
26#include <ql/math/integrals/all.hpp>
27#include <ql/math/randomnumbers/mt19937uniformrng.hpp>
28#include <ql/experimental/credit/lossdistribution.hpp>
29#include <ql/math/distributions/normaldistribution.hpp>
30#include <ql/experimental/credit/distribution.hpp>
31#include <boost/math/distributions/binomial.hpp>
32#include <iostream>
33#include <fstream>
34#include <boost/timer/timer.hpp>
35
36using namespace QuantLib;
37using namespace QuantExt;
38
39using std::fixed;
40using std::setprecision;
41using std::string;
42using std::vector;
43
45
46void computeDiscreteDistribution(std::vector<std::vector<double>>::iterator beginPDs,
47 std::vector<std::vector<double>>::iterator endPDs,
48 std::vector<std::vector<double>>::iterator beginLGDs, double runningDensity, double runningLoss,
49 std::map<double, double>& dist) {
50 if (beginPDs == endPDs) {
51 dist[runningLoss] = dist[runningLoss] + runningDensity;
52 } else {
53 for (size_t eventId = 0; eventId < beginPDs->size(); eventId++) {
54 double loss = runningLoss + beginLGDs->at(eventId);
55 double density = runningDensity * beginPDs->at(eventId);
56 computeDiscreteDistribution(beginPDs + 1, endPDs, beginLGDs + 1, density, loss, dist);
57 }
58 }
59}
60
61std::map<double, double> lossDistribution(const std::vector<double>& pds, const std::vector<double>& lgds) {
62
63 QL_REQUIRE(pds.size() == lgds.size(), "Mismatch number of pds and lgds");
64
65 vector<vector<double>> statePDs;
66 vector<vector<double>> stateLGDs;
67 for (const auto& pd : pds) {
68 statePDs.push_back({pd, 1 - pd});
69 }
70 for (const auto& lgd : lgds) {
71 stateLGDs.push_back({lgd, 0});
72 }
73 std::map<double, double> dist;
74 computeDiscreteDistribution(statePDs.begin(), statePDs.end(), stateLGDs.begin(), 1.0, 0.0, dist);
75 return dist;
76}
77
78std::map<double, double> lossDistribution(const std::vector<vector<double>>& pds,
79 const std::vector<vector<double>>& lgds) {
80
81 QL_REQUIRE(pds.size() == lgds.size(), "Mismatch number of pds and lgds");
82
83 vector<vector<double>> statePDs;
84 vector<vector<double>> stateLGDs;
85 for (const auto& pd : pds) {
86 statePDs.push_back(pd);
87 statePDs.back().push_back(1.0-std::accumulate(pd.begin(), pd.end(), 0.0));
88 }
89 for (const auto& lgd : lgds) {
90 stateLGDs.push_back(lgd);
91 stateLGDs.back().push_back(0);
92 }
93
94 std::map<double, double> dist;
95 computeDiscreteDistribution(statePDs.begin(), statePDs.end(), stateLGDs.begin(), 1.0, 0.0, dist);
96 return dist;
97}
98
100 BucketedDistribution(const HullWhiteBucketing& hwb, const std::map<double, double>& dist)
101 : upperBound(hwb.upperBucketBound()), p(hwb.buckets(), 0.0), A(hwb.buckets(), 0.0){
102 lowerBound.push_back(QL_MIN_REAL);
103 lowerBound.insert(lowerBound.end(), upperBound.begin(), upperBound.end()-1);
104
105 for (const auto& [l, d] : dist) {
106 size_t idx = hwb.index(l);
107 if (!close_enough(d, 0.0)) {
108 A[idx] = (A[idx] * p[idx] + d * l) / (d + p[idx]);
109 p[idx] = p[idx] + d;
110 }
111 }
112 }
113
115 : upperBound(hwb.upperBucketBound()), p(hwb.probability().begin(), hwb.probability().end()),
116 A(hwb.averageLoss().begin(), hwb.averageLoss().end()) {
117 lowerBound.push_back(QL_MIN_REAL);
118 lowerBound.insert(lowerBound.end(), upperBound.begin(), upperBound.end()-1);
119 }
120
121 std::vector<double> lowerBound;
122 std::vector<double> upperBound;
123 vector<double> p;
124 vector<double> A;
125
126 QuantLib::Distribution lossDistribution(size_t nBuckets, Real minLoss, Real maxLoss) {
127 QuantLib::Distribution dist(nBuckets, minLoss, maxLoss);
128 // Add from pd and losses [min, min+dx), ... [max-dx, max)
129 for (size_t i = 0; i < nBuckets; i++) {
130 dist.addDensity(i, p[i+1] / dist.dx(i));
131 dist.addAverage(i,A[i+1]);
132 }
133 return dist;
134 }
135 friend std::ostream& operator<<(std::ostream& os, const BucketedDistribution& dist);
136};
137
138std::ostream& operator<<(std::ostream& os, const BucketedDistribution& dist) {
139 os << "#\tLB\tUB\tPD\tAvg" << std::endl;
140 for (Size i = 0; i < dist.A.size(); ++i) {
141
142 os << i << "\t" << dist.lowerBound[i] << "\t" << dist.upperBound[i] << "\t" << dist.p[i] << "\t"
143 << dist.A[i] << std::endl;
144 }
145 return os;
146}
147
148double expectedTrancheLoss(const std::map<double, double>& dist, double detachmentPoint) {
149 double expectedLoss = 0.0;
150 for (const auto& [L, pd] : dist) {
151 if (L <= detachmentPoint) {
152 expectedLoss += L * pd;
153 } else {
154 expectedLoss += detachmentPoint * pd;
155 }
156 }
157 return expectedLoss;
158}
159
160double expectedTrancheLoss(QuantLib::Distribution dist, double attachmentAmount, double detachmentAmount) {
161 QuantLib::Real expectedLoss = 0;
162 dist.normalize();
163 for (QuantLib::Size i = 0; i < dist.size(); i++) {
164 // Real x = dist.x(i) + dist.dx(i)/2; // in QL distribution.cpp
165 QuantLib::Real x = dist.average(i);
166 if (x < attachmentAmount)
167 continue;
168 if (x > detachmentAmount)
169 break;
170 expectedLoss += (x - attachmentAmount) * dist.dx(i) * dist.density(i);
171 }
172 expectedLoss += (detachmentAmount - attachmentAmount) * (1.0 - dist.cumulativeDensity(detachmentAmount));
173 return expectedLoss;
174}
175
176
177double expectedTrancheLoss(const vector<double>& prob, const vector<double>& loss, double detachment) {
178 QuantLib::Real expectedLoss = 0;
179 for (size_t i = 0; i < prob.size(); ++i) {
180 expectedLoss += std::min(loss[i], detachment) * prob[i];
181 }
182 return expectedLoss;
183}
184
185
186}
187BOOST_FIXTURE_TEST_SUITE(QuantExtTestSuite, qle::test::TopLevelFixture)
188
189BOOST_AUTO_TEST_SUITE(HullWhiteBucketingTestSuite)
190
191BOOST_AUTO_TEST_CASE(testHullWhiteBucketing) {
192
193 BOOST_TEST_MESSAGE("Testing Hull White Bucketing...");
194
195 boost::timer::cpu_timer timer;
196
197 Size N = 100; // buckets
198 Size L = 100; // obligors
199 Real pd = 0.01;
200
201 std::vector<Real> buckets;
202 for (Size i = 0; i <= N; ++i) {
203 buckets.push_back(0.5 + static_cast<Real>(i));
204 }
205
206 std::vector<Real> pds(L, pd), losses(L, 1.0);
207
208 HullWhiteBucketing hw(buckets.begin(), buckets.end());
209 hw.compute(pds.begin(), pds.end(), losses.begin());
210
211 const Array& p = hw.probability();
212 const Array& A = hw.averageLoss();
213
214 boost::math::binomial ref(L, pd);
215
216 for (Size i = 0; i < Size(std::min(Size(p.size()),Size(15))); ++i) {
217 if (i < p.size() - 1) {
218 BOOST_TEST_MESSAGE("Bucket " << i << " ..." << hw.upperBucketBound()[i] << ": p = " << p[i]
219 << " A = " << A[i] << " ref = " << boost::math::pdf(ref, i));
220 BOOST_CHECK_CLOSE(p[i], boost::math::pdf(ref, i), 1E-10);
221 BOOST_CHECK_CLOSE(A[i], static_cast<Real>(i), 1E-10);
222 } else {
223 BOOST_TEST_MESSAGE("Bucket " << i << " ..." << hw.upperBucketBound()[i] << ": p = " << p[i]
224 << " A = " << A[i] << " ref = " << 1.0 - boost::math::cdf(ref, i - 1));
225 BOOST_CHECK_CLOSE(p[i], 1.0 - boost::math::cdf(ref, i - 1), 1E-6);
226 }
227 }
228
229 BOOST_TEST_MESSAGE("Elapsed: " << timer.elapsed().wall * 1e-9);
230}
231
232BOOST_AUTO_TEST_CASE(testHullWhiteBucketingQuantLib) {
233
234 BOOST_TEST_MESSAGE("Testing Hull White Bucketing in QuantLib...");
235
236 boost::timer::cpu_timer timer;
237
238 Size N = 100; // buckets
239 Size L = 100; // obligors
240 Real pd = 0.01;
241
242 // L buckets of width 1
243 QuantLib::ext::shared_ptr<QuantLib::LossDist> bucketing = QuantLib::ext::make_shared<QuantLib::LossDistBucketing>(N+1, N+1);
244
245 std::vector<Real> pds(L, pd), losses(L, 1.0);
246
247 QuantLib::Distribution dist = (*bucketing)(losses, pds);
248
249 boost::math::binomial ref(L, pd);
250
251 for (Size i = 0; i <= Size(std::min(Size(N),Size(15))); ++i) {
252 Real p = dist.density(i) * dist.dx(i);
253 Real A = dist.average(i);
254 Real x = dist.x(i);
255 BOOST_TEST_MESSAGE("Bucket " << i << " ..." << x << ": p = " << p
256 << " A = " << A << " ref = " << boost::math::pdf(ref, i));
257 BOOST_CHECK_CLOSE(p, boost::math::pdf(ref, i), 1E-10);
258 BOOST_CHECK_CLOSE(A, static_cast<Real>(i), 1E-10);
259 }
260
261 BOOST_TEST_MESSAGE("Elapsed: " << timer.elapsed().wall * 1e-9);
262}
263
264BOOST_AUTO_TEST_CASE(testHullWhiteBucketingMultiState) {
265 BOOST_TEST_MESSAGE("Testing Multi State Hull White Bucketing...");
266
267 Size N = 10; // buckets
268 Size L = 100; // obligors
269
270 std::vector<Real> buckets;
271 for (Size i = 0; i <= N; ++i) {
272 buckets.push_back(0.5 + static_cast<Real>(i));
273 }
274
275 std::vector<Real> pd = {0.01, 0.02};
276 std::vector<Real> l = {1.0, 2.0};
277 std::vector<std::vector<Real>> pds(L, pd), losses(L, l);
278
279 HullWhiteBucketing hw(buckets.begin(), buckets.end());
280 hw.computeMultiState(pds.begin(), pds.end(), losses.begin());
281
282 const Array& p = hw.probability();
283 const Array& A = hw.averageLoss();
284
285 // generate reference results with monte carlo
286 Array ref(p.size(), 0.0);
287 MersenneTwisterUniformRng mt(42);
288 Size paths = 1000000;
289 for (Size k = 0; k < paths; ++k) {
290 Real loss = 0.0;
291 for (Size ll = 0; ll < L; ++ll) {
292 Real r = mt.nextReal();
293 if (r < pd[0])
294 loss += l[0];
295 else if (r < pd[0] + pd[1])
296 loss += l[1];
297 }
298 Size idx = hw.index(loss);
299 ref[idx] += 1.0;
300 }
301 ref /= paths;
302
303 // check against reference results
304 for (Size i = 0; i < p.size(); ++i) {
305 Real diff = p[i] - ref[i];
306 BOOST_TEST_MESSAGE("Bucket " << i << " ..." << hw.upperBucketBound()[i] << ": p = " << p[i] << " A = " << A[i]
307 << " ref = " << ref[i] << " pdiff " << std::scientific << diff);
308 BOOST_CHECK_CLOSE(p[i], ref[i], 1.5);
309 if (i < p.size() - 1)
310 BOOST_CHECK_CLOSE(A[i], static_cast<Real>(i), 1E-10);
311 }
312}
313
314BOOST_AUTO_TEST_CASE(testHullWhiteBucketingMultiStateEdgeCase) {
315 BOOST_TEST_MESSAGE("Testing Multi State Hull White Bucketing, edge case with different probabilities and identical losses...");
316
317 Size N = 10; // buckets
318 Size L = 100; // obligors
319
320 std::vector<Real> buckets;
321 for (Size i = 0; i <= N; ++i) {
322 buckets.push_back(0.5 + static_cast<Real>(i));
323 }
324
325 std::vector<Real> pd = {0.005, 0.01, 0.005};
326 std::vector<Real> l = {1.0, 1.0, 1.0};
327 std::vector<std::vector<Real>> pds(L, pd), losses(L, l);
328
329 HullWhiteBucketing hw(buckets.begin(), buckets.end());
330 hw.computeMultiState(pds.begin(), pds.end(), losses.begin());
331
332 const Array& p = hw.probability();
333 const Array& A = hw.averageLoss();
334
335 // Equivalent single state
336 std::vector<Real> pds1(L, 0.02);
337 std::vector<Real> losses1(L, 1.0);
338
339 HullWhiteBucketing hw1(buckets.begin(), buckets.end());
340 hw1.compute(pds1.begin(), pds1.end(), losses1.begin());
341
342 const Array& p1 = hw1.probability();
343 const Array& A1 = hw1.averageLoss();
344
345 // check consistency
346 for (Size i = 0; i < p.size(); ++i) {
347 Real diffp = p[i] - p1[i];
348 Real diffA = A[i] - A1[i];
349 BOOST_TEST_MESSAGE("Bucket " << i << " ..." << hw.upperBucketBound()[i] << ": p = " << p[i] << " A = " << A[i]
350 << " pdiff " << std::scientific << diffp << " Adiff = " << diffA);
351 BOOST_CHECK_CLOSE(p[i], p1[i], 0.01);
352 if (i < p.size() - 1) {
353 BOOST_CHECK_CLOSE(A[i], static_cast<Real>(i), 1E-10);
354 BOOST_CHECK_CLOSE(A1[i], static_cast<Real>(i), 1E-10);
355 }
356 }
357}
358
359BOOST_AUTO_TEST_CASE(testHullWhiteBucketingMultiStateExpectedLoss) {
360 BOOST_TEST_MESSAGE("Testing Multi State Hull White Bucketing, expected loss...");
361
362 Size N = 80; // buckets
363 Size L = 100; // obligors
364
365 std::vector<Real> buckets;
366 for (Size i = 0; i <= N; ++i) {
367 buckets.push_back(0.5 + static_cast<Real>(i));
368 }
369
370 std::vector<Real> pd = {0.02, 0.01, 0.02};
371 std::vector<Real> l1 = {2.0, 2.0, 2.0};
372 std::vector<Real> l2 = {1.0, 2.0, 3.0};
373 std::vector<std::vector<Real>> pds(L, pd), losses1(L, l1), losses2(L, l2);
374
375 HullWhiteBucketing hw1(buckets.begin(), buckets.end());
376 hw1.computeMultiState(pds.begin(), pds.end(), losses1.begin());
377 const Array& p1 = hw1.probability();
378 const Array& A1 = hw1.averageLoss();
379
380 HullWhiteBucketing hw2(buckets.begin(), buckets.end());
381 hw2.computeMultiState(pds.begin(), pds.end(), losses2.begin());
382 const Array& p2 = hw2.probability();
383 const Array& A2 = hw2.averageLoss();
384
385 Real el1 = 0.0, el2 = 0.0;
386 for (Size i = 0; i < p1.size(); ++i) {
387 el1 += p1[i] * A1[i];
388 el2 += p2[i] * A2[i];
389 }
390
391 for (Size i = 0; i < p1.size(); ++i) {
392 BOOST_TEST_MESSAGE("Bucket " << i << " ..." << hw1.upperBucketBound()[i] << ":"
393 << std::scientific << " p = " << p1[i] << " " << p2[i] << " " << p1[i] - p2[i]);
394 }
395 // check consistency
396 // the different loss levels change the loss distribution, but the balanced setup should keep expected losses unchanged
397 BOOST_TEST_MESSAGE("Expected loss: " << std::scientific << el1 << " " << el2 << " " << el1 - el2);
398 BOOST_CHECK_CLOSE(el1, el2, 1e-12);
399}
400
401Real expectedTrancheLoss(Real attach, Real detach, const Array& p, const Array& cumu, const Array& loss) {
402 Real expectedLoss = 0;
403 QL_REQUIRE((p.size() == cumu.size()) && (p.size() == loss.size()), "array size mismatch");
404 Size i;
405 for (i = 0; i < p.size(); ++i) {
406 Real x = loss[i];
407 if (x < attach)
408 continue;
409 if (x > detach)
410 break;
411 expectedLoss += (x - attach) * p[i];
412 }
413 expectedLoss += (detach - attach) * (1.0 - cumu[i-1]);
414 return expectedLoss;
415}
416
417void run_case(std::vector<Real> l, std::string fileName, double detachmentRatio = 0.2) {
418 QL_REQUIRE(l.size() == 3, "three losses required");
419 // loss sizes in increasing order
420 for (Size i = 1; i < l.size(); ++i)
421 QL_REQUIRE(l[i] >= l[i-1], "increasing losses required");
422
423 BOOST_TEST_MESSAGE("Testing Multi State Hull White Bucketing, expected tranche loss for stylized CDX: " << l[0] << " " << l[1] << " " << l[2]);
424
425 Size bucketsFullBasket = 400; // buckets
426 Size bucketsTranche = 100;
427 Size L = 100; // obligors with notional 1 each
428 Real rho = 0.75;
429 Real attachmentRatio = 0.0;
430 Real a = attachmentRatio * L;
431 Real d = detachmentRatio * L;
432 Real pd0 = 0.04; // same PD across all entities, 0.01 for IG, 0.04 for HY
433 Real cutoff = 1.0 * L; // just beyond detachment point is sufficient for tranche expectations only
434 std::vector<Real> pd = {pd0 * 0.35, pd0 * 0.3, pd0 * 0.35}; // Markit 2020 grid
435 // std::vector<Real> l_1 = {0.6, 0.6, 0.6}; // rr 0.4
436 // std::vector<Real> l_2 = {0.9, 0.6, 0.3}; // rr 0.1, 0.4, 0.7
437 // std::vector<Real> l_3 = {0.3, 0.6, 0.9}; // rr 0.7, 0.4, 0.1 (to be used according to Krekel & Markit)
438 // std::vector<Real> l = l_1; // pick one of the loss vectors
439 std::vector<std::vector<Real>> pds(L, pd), losses(L, l);
440
441 // Marginal loss distribution, i.e. rho = 0
442
443 HullWhiteBucketing hw0(0.0, cutoff, bucketsFullBasket);
444 hw0.computeMultiState(pds.begin(), pds.end(), losses.begin());
445 Array p0 = hw0.probability();
446 Array A0 = hw0.averageLoss();
447
448 // Compute thresholds of the Gaussian Copula model
449
450 InverseCumulativeNormal ICN;
451 CumulativeNormalDistribution CN;
452 std::vector<std::vector<Real>> c(L, std::vector<Real>(pd.size() + 1, 0.0));
453 std::vector<std::vector<Real>> q(L, std::vector<Real>(pd.size() + 1, pd0));
454 for (Size i = 0; i < c.size(); ++i) {
455 c[i][0] = ICN(q[i][0]);
456 Real sum = 0.0;
457 // Size np = pd.size();
458 for (Size j = 0; j < pd.size(); ++j) {
459 sum += pd[j] / pd0;
460 q[i][j+1] = q[i][0] * (1.0 - sum);
461 if (close_enough(q[i][j+1], 0.0))
462 c[i][j+1] = QL_MIN_REAL;
463 else
464 c[i][j+1] = ICN(q[i][j+1]);
465 }
466 QL_REQUIRE(fabs(q[i].back()) < 1e-10, "expected zero qij, but found " << q[i].back() << " for i=" << i);
467 }
468
469 Size mSteps = 63;
470 Real mMin = -5.0;
471 Real mMax = 5.0;
472 Real dm = (mMax - mMin) / mSteps;
473 std::vector<std::vector<Real>> cpds = pds; // just to allocate the correct size
474
475 Array p(p0.size(), 0.0);
476 Array A(A0.size(), 0.0);
477
478 Array pTranche(bucketsTranche + 2, 0.0);
479 Array ATranche(bucketsTranche + 2, 0.0);
480
481 Array pref(p0.size(), 0.0);
482 Real norm = 0.0;
483 double trancheLossMC = 0;
484 for (Size k = 0; k <= mSteps; ++k) { // copula loop
485 BOOST_TEST_MESSAGE("Copula loop " << k << "/" << mSteps);
486 Real m = mMin + dm * k;
487 Real mDensity = exp(-m * m / 2) / sqrt(2.0 * M_PI);
488 norm += mDensity * dm;
489
490 // Compute conditional PDs
491
492 for (Size i = 0; i < c.size(); i++) {
493 Real cpd0 = CN((c[i][0] - sqrt(rho) * m) / sqrt(1.0 - rho));
494 Real sum = 0.0;
495 for (Size j = 1; j < c[i].size(); ++j) {
496 // this ordering is consistent also assumes that cpds[i][0] is associated with the largest loss
497 cpds[i][j-1] = CN((c[i][j-1]-sqrt(rho)*m)/sqrt(1.0-rho)) - CN((c[i][j]-sqrt(rho)*m)/sqrt(1.0-rho));
498 sum += cpds[i][j-1];
499 }
500 QL_REQUIRE(fabs(sum - cpd0) < 1e-10, "probability check failed for factor " << m << ": " << sum << " vs " << cpd0);
501 }
502
503 // Loss distribution conditional on m - bucketing
504
505 HullWhiteBucketing hwm(0.0, cutoff, bucketsFullBasket);
506 hwm.computeMultiState(cpds.begin(), cpds.end(), losses.begin());
507 Array pm = hwm.probability();
508 Array Am = hwm.averageLoss();
509
510 HullWhiteBucketing hwmTranche(0.0, d, bucketsTranche);
511 hwmTranche.computeMultiState(cpds.begin(), cpds.end(), losses.begin());
512 Array pmTranche = hwmTranche.probability();
513 Array AmTranche = hwmTranche.averageLoss();
514
515
516 // Loss distribution conditional on m - Monte Carlo
517 Array pmc(pm.size(), 0.0);
518 MersenneTwisterUniformRng mt(42);
519 Size paths = 50000;
520 double mLossMC = 0;
521 for (Size kk = 0; kk < paths; ++kk) {
522 Real loss = 0.0;
523 for (Size ll = 0; ll < L; ++ll) {
524 Real r = mt.nextReal();
525 Real sum = 0.0;
526 Size n = cpds[ll].size();
527 for (Size mm = 0; mm < cpds[ll].size(); ++mm) {
528 sum += cpds[ll][n - 1 - mm];
529 if (r < sum) {
530 loss += losses[ll][n - 1 - mm];
531 break;
532 }
533 }
534 }
535 mLossMC += std::min(loss, d);
536 Size idx = hwm.index(loss);
537 pmc[idx] += 1.0;
538 }
539 pmc /= paths;
540 trancheLossMC += mLossMC * dm * mDensity / paths;
541 // Aggregate
542 for (Size j = 0; j < p.size(); j++) {
543 BOOST_CHECK_MESSAGE(Am[j] >= 0.0, "averageLoss[" << j << "] " << Am[j] << " at k=" << k);
544 BOOST_CHECK_MESSAGE(pm[j] >= 0.0 && pm[j] <= 1.0, "probability[" << j << "] " << pm[j] << " at k=" << k);
545 p[j] += pm[j] * mDensity * dm;
546 A[j] += Am[j] * mDensity * dm;
547
548 pref[j] += pmc[j] * mDensity * dm;
549 }
550 for (Size j = 0; j < pTranche.size(); ++j) {
551 pTranche[j] += pmTranche[j] * mDensity * dm;
552 ATranche[j] += AmTranche[j] * mDensity * dm;
553 }
554
555 }
556 BOOST_CHECK_CLOSE(norm, 1.0, 0.1);
557
558
559
560
561 Real el0 = 0.0, el = 0.0, /*elref = 0.0, */sum0 = 0.0, sum = 0.0;
562
563 Distribution refDistribution(bucketsFullBasket, 0, cutoff);
564 Distribution hwDistribution(bucketsFullBasket, 0, cutoff);
565 for (Size i = 0; i < bucketsFullBasket; i++) {
566 hwDistribution.addDensity(i, p[i + 1] / hwDistribution.dx(i));
567 hwDistribution.addAverage(i, A[i + 1]);
568 refDistribution.addDensity(i, pref[i + 1] / hwDistribution.dx(i));
569 refDistribution.addAverage(i, A[i + 1]);
570 }
571
572 Distribution hwDistributionTranche(bucketsTranche, a, d);
573 for (Size i = 0; i < bucketsTranche; i++) {
574 hwDistributionTranche.addDensity(i, pTranche[i + 1] / hwDistributionTranche.dx(i));
575 hwDistributionTranche.addAverage(i, ATranche[i + 1]);
576 }
577
578 double calculatedLossTrancheHWFullBucketing = test_hullwhitebucketing::expectedTrancheLoss(hwDistribution, a, d);
579 double calculatedLossTrancheHWTrancheBucketing =
580 test_hullwhitebucketing::expectedTrancheLoss(hwDistributionTranche, a, d);
581
582 BOOST_TEST_MESSAGE("Expected tranche loss (MC) " << trancheLossMC);
583 BOOST_TEST_MESSAGE("Calculated tranche loss (HW bucketing over full basket) "
584 << calculatedLossTrancheHWFullBucketing);
585 BOOST_TEST_MESSAGE("Calculated tranche loss (HW bucketing of the tranche) "
586 << calculatedLossTrancheHWTrancheBucketing);
587
588 BOOST_CHECK_CLOSE(trancheLossMC, calculatedLossTrancheHWFullBucketing, 0.25);
589 BOOST_CHECK_CLOSE(trancheLossMC, calculatedLossTrancheHWTrancheBucketing, 0.25);
590
591 // Calculate full basket expected loss
592 for (Size i = 0; i < p.size(); ++i) {
593 el0 += p0[i] * A0[i];
594 el += p[i] * A[i];
595 sum0 += p0[i];
596 sum += p[i];
597 }
598
599 std::ofstream file;
600 if (fileName != "")
601 file.open(fileName.c_str());
602
603 Array cumu0(p0.size(), 0.0);
604 Array cumu(p.size(), 0.0);
605 Array cumuref(p.size(), 0.0);
606
607 for (Size i = 0; i < p.size(); ++i) {
608 cumu0[i] = (i == 0 ? p0[0] : p0[i] + cumu0[i-1]);
609 cumu[i] = (i == 0 ? p[0] : p[i] + cumu[i-1]);
610 cumuref[i] = (i == 0 ? pref[0] : pref[i] + cumuref[i-1]);
611 if (file.is_open())
612 file << i << " " << std::scientific
613 << A0[i] << " " << p0[i] << " " << cumu0[i] << " "
614 << A[i] << " " << p[i] << " " << cumu[i] << " "
615 << A[i] << " " << pref[i] << " " << cumuref[i] << std::endl;
616 }
617 if (file.is_open()) {
618 file << "# pd0: " << pd0 << std::endl
619 << "# losses: " << l[0] << " " << l[1] << " " << l[2] << std::endl
620 << "# attachment point: " << attachmentRatio << std::endl
621 << "# detachment point: " << detachmentRatio << std::endl
622 << "# correlation: " << rho << std::endl
623 << "# Expected basket loss, marginal: " << el0 << std::endl
624 << "# Expected basket loss, correlated: " << el << std::endl
625 << "# Expected tranche loss, marginal: " << expectedTrancheLoss(a, d, p0, cumu0, A0) << std::endl
626 << "# Expected tranche loss, correlated (full): " << calculatedLossTrancheHWFullBucketing << std::endl
627 << "# Expected tranche loss, correlated: " << calculatedLossTrancheHWTrancheBucketing << std::endl
628 << "# Expected tranche loss, correlated, ref: " << trancheLossMC << std::endl;
629 file.close();
630 }
631
632 BOOST_TEST_MESSAGE("pd: " << pd0);
633 BOOST_TEST_MESSAGE("losses: " << l[0] << " " << l[1] << " " << l[2]);
634 BOOST_TEST_MESSAGE("attachment point: " << attachmentRatio);
635 BOOST_TEST_MESSAGE("detachment point: " << detachmentRatio);
636 BOOST_TEST_MESSAGE("correlation: " << rho);
637 BOOST_TEST_MESSAGE("Expected basket loss, marginal: " << el0);
638 BOOST_TEST_MESSAGE("Expected basket loss, correlated: " << el);
639 BOOST_TEST_MESSAGE("# Expected tranche loss, correlated (full): " << calculatedLossTrancheHWFullBucketing);
640 BOOST_TEST_MESSAGE("# Expected tranche loss, correlated: " << calculatedLossTrancheHWTrancheBucketing);
641 BOOST_TEST_MESSAGE("# Expected tranche loss, correlated, ref: " << trancheLossMC);
642 BOOST_CHECK_CLOSE(sum0, 1.0, 1e-4);
643 BOOST_CHECK_CLOSE(sum, 1.0, 1e-4);
644 BOOST_CHECK_CLOSE(el0, el, 1.0);
645}
646
647BOOST_AUTO_TEST_CASE(testHullWhiteBucketingMultiStateExpectedTrancheLoss) {
648
649 run_case({0.6, 0.6, 0.6}, "data_1.txt", 0.03);
650 run_case({0.3, 0.6, 0.9}, "data_2.txt", 0.07); // 40% recovery CDX/iTraxx IG
651 run_case({0.3, 0.6, 0.9}, "data_3.txt", 0.15); // 40% recovery CDX/iTraxx IG
652 run_case({0.5, 0.7, 0.9}, "data_5.txt", 0.35); // 30% recovery CDX HY
653 //run_case({0.76, 0.6, 0.2}, "data_4.txt"); // asymmetric
654}
655
656BOOST_AUTO_TEST_CASE(testHullWhiteBucketingNonEqualPDs) {
657 BOOST_TEST_MESSAGE("Testing Hull White Bucketing with different PDs...");
658
659 Size N = 5; // buckets
660
661 double lowerlimit = 0;
662 double upperlimit = 5;
663
664 std::vector<Real> pds{0.1, 0.1, 0.05, 0.1, 0.05};
665 std::vector<Real> losses{1.0, 1.0, 1.0, 1.0, 1.0};
666
667 HullWhiteBucketing hw(lowerlimit, upperlimit, N);
668 hw.compute(pds.begin(), pds.end(), losses.begin());
669
670 std::vector<Real> expectedPDs;
671 auto p = hw.probability();
672 auto A = hw.averageLoss();
673 std::vector<double> ref = {0., 0.6579225000000, 0.2885625000000, 0.0492750000000, 0.0040750000000, 0.0001625000000,
674 0.0000025000000};
675 std::vector<double> refA = {0.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0};
676 for (Size i = 0; i < hw.buckets(); ++i) {
677 BOOST_TEST_MESSAGE("Bucket " << (i==0 ? QL_MIN_REAL : hw.upperBucketBound()[i-1]) << " ..." << hw.upperBucketBound()[i] << ": p = " << p[i] << " A = " << A[i]
678 << " ref = " << ref[i]);
679 BOOST_CHECK_CLOSE(p[i], ref[i], 0.01);
680 BOOST_CHECK_CLOSE(A[i], refA[i], 0.01);
681 }
682}
683
684BOOST_AUTO_TEST_CASE(testHullWhiteBucketingSingleStateExpectedLossNonHomogenous) {
685 using namespace test_hullwhitebucketing;
686 BOOST_TEST_MESSAGE("Testing Multistate Hull White Bucketing Inhomogeneous Portfolio");
687 Size N = 20; // buckets
688 double lowerlimit = 0;
689 std::vector<Real> pds {
690 0.0125, 0.0093, 0.0106, 0.0095, 0.0077, 0.0104, 0.0075, 0.0117, 0.0078, 0.009,
691 0.0092, 0.0088, 0.0107, 0.0085, 0.0089, 0.0115, 0.0092, 0.0093, 0.012, 0.0102
692 };
693 std::vector<Real> lgds{
694 0.45, 0.41, 0.35, 0.39, 0.39, 0.35, 0.42, 0.39, 0.45, 0.37,
695 0.4, 0.39, 0.42, 0.37, 0.36, 0.44, 0.44, 0.42, 0.38, 0.42
696 };
697 size_t nObligors = pds.size();
698 BOOST_TEST_MESSAGE("number of obligors " << nObligors);
699 BOOST_TEST_MESSAGE("number of Buckets " << N);
700 std::map<double, double> excactDistribution = lossDistribution(pds, lgds);
701 for (auto detachmentPoint : {0.03, 0.07, 0.15, 0.35}) {
702 double upperlimit = static_cast<double>(nObligors) * detachmentPoint;
703 BOOST_TEST_MESSAGE("detachment point " << detachmentPoint);
704 BOOST_TEST_MESSAGE("upperLimit " << upperlimit);
705 HullWhiteBucketing hw(lowerlimit, upperlimit, N);
706 double expectedLoss = test_hullwhitebucketing::expectedTrancheLoss(excactDistribution, upperlimit);
707 hw.compute(pds.begin(), pds.end(), lgds.begin());
708 BucketedDistribution hwBucketing(hw);
709 double calculatedLoss = test_hullwhitebucketing::expectedTrancheLoss(hwBucketing.p, hwBucketing.A, upperlimit);
710 BOOST_TEST_MESSAGE("Expected Loss " << expectedLoss << " and calculated loss " << calculatedLoss);
711 BOOST_CHECK_CLOSE(expectedLoss, calculatedLoss, 1e-4);
712 }
713}
714
715BOOST_AUTO_TEST_CASE(testHullWhiteBucketingMultiStateExpectedLossNonHomogenous) {
716
717 using namespace test_hullwhitebucketing;
718
719 BOOST_TEST_MESSAGE("Testing Multistate Hull White Bucketing Inhomogeneous Portfolio");
720 Size N = 20; // buckets
721 double lowerlimit = 0.0;
722 std::vector<std::vector<Real>> pds{
723 {0.0238, 0.0079}, {0.0223, 0.0074}, {0.0293, 0.0098}, {0.0106, 0.0035}, {0.012, 0.004},
724 {0.0175, 0.0058}, {0.0129, 0.0043}, {0.0091, 0.003}, {0.014, 0.0047}, {0.0138, 0.0046},
725 {0.023, 0.0077}, {0.0299, 0.01}, {0.0183, 0.0061}, {0.0291, 0.0097}
726 };
727 std::vector<std::vector<Real>> lgds{
728 {0.44, 0.48}, {0.34, 0.37}, {0.46, 0.51}, {0.47, 0.52}, {0.3, 0.33}, {0.42, 0.46}, {0.33, 0.36},
729 {0.3, 0.33}, {0.3, 0.33}, {0.42, 0.46}, {0.38, 0.42}, {0.4, 0.44}, {0.38, 0.42}, {0.44, 0.48}
730
731 };
732 size_t nObligors = pds.size();
733 BOOST_TEST_MESSAGE("number of obligors " << nObligors);
734 BOOST_TEST_MESSAGE("number of Buckets " << N);
735 std::map<double, double> excactDistribution = lossDistribution(pds, lgds);
736 for (auto detachmentPoint : {0.03, 0.07, 0.15, 0.35}) {
737 double upperlimit = static_cast<double>(nObligors) * detachmentPoint;
738 BOOST_TEST_MESSAGE("detachment point " << detachmentPoint);
739 BOOST_TEST_MESSAGE("upperLimit " << upperlimit);
740 HullWhiteBucketing hw(lowerlimit, upperlimit, N);
741 double expectedLoss = test_hullwhitebucketing::expectedTrancheLoss(excactDistribution, upperlimit);
742 hw.computeMultiState(pds.begin(), pds.end(), lgds.begin());
743 BucketedDistribution hwBucketing(hw);
744 double calculatedLoss = test_hullwhitebucketing::expectedTrancheLoss(hwBucketing.p, hwBucketing.A, upperlimit);
745 BOOST_TEST_MESSAGE("Expected Loss " << expectedLoss << " and calculated loss " << calculatedLoss);
746 BOOST_CHECK_CLOSE(expectedLoss, calculatedLoss, 1e-4);
747 }
748}
749
750BOOST_AUTO_TEST_CASE(testBucketingIndex) {
751 Size N = 5; // buckets
752 double upperlimit = 5;
753 BOOST_TEST_MESSAGE("Testing uniform bucket indexing");
754 HullWhiteBucketing hw(0.0, upperlimit, N);
755 std::map<double, size_t> testCases1{{-1, 0}, {0., 1}, {0.99, 1}, {1.0, 2}, {1.75, 2}, {2.0, 3}};
756
757 for (const auto& [value, expectedIndex] : testCases1) {
758 BOOST_CHECK_EQUAL(expectedIndex, hw.index(value));
759 }
760
761 // non uniform buckets
762 BOOST_TEST_MESSAGE("Testing non uniform bucket indexing");
763 std::vector<Real> buckets{0., 0.25, 0.3, 0.5, 1};
764
765 HullWhiteBucketing hw2(buckets.begin(), buckets.end());
766
767 std::map<double, size_t> testCases2{{-0.01, 0}, {0., 1}, {0.125, 1}, {0.25, 2}, {0.275, 2}, {0.3, 3}, {1.1, 5}};
768
769 for (const auto& [value, expectedIndex] : testCases2) {
770 BOOST_CHECK_EQUAL(expectedIndex, hw2.index(value));
771 }
772}
773
774BOOST_AUTO_TEST_SUITE_END()
775
776BOOST_AUTO_TEST_SUITE_END()
Represents a bucketed probability distibution.
Size index(const Real x) const
const std::vector< Real > & upperBucketBound() const
void computeMultiState(I1 pBegin, I1 pEnd, I2 lossesBegin)
const Array & probability() const
void compute(I1 pdBegin, I1 pdEnd, I2 lossesBegin)
const Array & averageLoss() const
probability bucketing as in Valuation of a CDO and an nth to Default CDS without Monte Carlo Simulati...
RandomVariable sqrt(RandomVariable x)
Filter close_enough(const RandomVariable &x, const RandomVariable &y)
CompiledFormula exp(CompiledFormula x)
std::ostream & operator<<(std::ostream &out, EquityReturnType t)
Real sum(const Cash &c, const Cash &d)
Definition: bondbasket.cpp:107
void computeDiscreteDistribution(std::vector< std::vector< double > >::iterator beginPDs, std::vector< std::vector< double > >::iterator endPDs, std::vector< std::vector< double > >::iterator beginLGDs, double runningDensity, double runningLoss, std::map< double, double > &dist)
double expectedTrancheLoss(const std::map< double, double > &dist, double detachmentPoint)
std::ostream & operator<<(std::ostream &os, const BucketedDistribution &dist)
std::map< double, double > lossDistribution(const std::vector< double > &pds, const std::vector< double > &lgds)
Real expectedTrancheLoss(Real attach, Real detach, const Array &p, const Array &cumu, const Array &loss)
BOOST_AUTO_TEST_CASE(testHullWhiteBucketing)
void run_case(std::vector< Real > l, std::string fileName, double detachmentRatio=0.2)
Fixture that can be used at top level.