# root/trunk/tests.d

Revision 289, 140.2 kB (checked in by dsimcha, 5 years ago) |
---|

Line | |
---|---|

1 | /**Hypothesis testing beyond simple CDFs. All functions work with input |

2 | * ranges with elements implicitly convertible to double unless otherwise noted. |

3 | * |

4 | * Author: David Simcha*/ |

5 | /* |

6 | * License: |

7 | * Boost Software License - Version 1.0 - August 17th, 2003 |

8 | * |

9 | * Permission is hereby granted, free of charge, to any person or organization |

10 | * obtaining a copy of the software and accompanying documentation covered by |

11 | * this license (the "Software") to use, reproduce, display, distribute, |

12 | * execute, and transmit the Software, and to prepare derivative works of the |

13 | * Software, and to permit third-parties to whom the Software is furnished to |

14 | * do so, all subject to the following: |

15 | * |

16 | * The copyright notices in the Software and this entire statement, including |

17 | * the above license grant, this restriction and the following disclaimer, |

18 | * must be included in all copies of the Software, in whole or in part, and |

19 | * all derivative works of the Software, unless such copies or derivative |

20 | * works are solely in the form of machine-executable object code generated by |

21 | * a source language processor. |

22 | * |

23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |

24 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |

25 | * FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT |

26 | * SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE |

27 | * FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, |

28 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |

29 | * DEALINGS IN THE SOFTWARE. |

30 | */ |

31 | module dstats.tests; |

32 | |

33 | import std.algorithm, std.functional, std.range, std.conv, std.math, std.traits, |

34 | std.exception, std.typetuple; |

35 | |

36 | import dstats.base, dstats.distrib, dstats.alloc, dstats.summary, dstats.sort, |

37 | dstats.cor; |

38 | |

39 | private static import dstats.infotheory; |

40 | |

41 | version(unittest) { |

42 | import std.stdio, dstats.random; |

43 | void main(){} |

44 | } |

45 | |

46 | /**Alternative hypotheses. Exact meaning varies with test used.*/ |

47 | enum Alt { |

48 | /// f(input1) != X |

49 | twoSided, |

50 | |

51 | /// f(input1) < X |

52 | less, |

53 | |

54 | /// f(input1) > X |

55 | greater, |

56 | |

57 | /**Skip P-value computation (and confidence intervals if applicable) |

58 | * and just return the test statistic.*/ |

59 | none, |

60 | |

61 | // These are kept for compatibility with the old style, are intentionally |

62 | // lacking DDoc and may eventually be deprecated/removed. |

63 | TWOSIDE = twoSided, |

64 | LESS = less, |

65 | GREATER = greater, |

66 | NONE = none |

67 | } |

68 | |

69 | /**A plain old data struct for returning the results of hypothesis tests.*/ |

70 | struct TestRes { |

71 | /// The test statistic. What exactly this is is specific to the test. |

72 | double testStat; |

73 | |

74 | /**The P-value against the provided alternative. This struct can |

75 | * be implicitly converted to just the P-value via alias this.*/ |

76 | double p; |

77 | |

78 | /// Allow implicit conversion to the P-value. |

79 | alias p this; |

80 | |

81 | /// |

82 | string toString() { |

83 | return text("Test Statistic = ", testStat, "\nP = ", p); |

84 | } |

85 | } |

86 | |

87 | /**A plain old data struct for returning the results of hypothesis tests |

88 | * that also produce confidence intervals. Contains, can implicitly convert |

89 | * to, a TestRes.*/ |

90 | struct ConfInt { |

91 | /// |

92 | TestRes testRes; |

93 | |

94 | /// Lower bound of the confidence interval at the level specified. |

95 | double lowerBound; |

96 | |

97 | /// Upper bound of the confidence interval at the level specified. |

98 | double upperBound; |

99 | |

100 | alias testRes this; |

101 | |

102 | /// |

103 | string toString() { |

104 | return text("Test Statistic = ", testRes.testStat, "\nP = ", testRes.p, |

105 | "\nLower Confidence Bound = ", lowerBound, |

106 | "\nUpper Confidence Bound = ", upperBound); |

107 | } |

108 | } |

109 | |

110 | /**Tests whether a struct/class has the necessary information for calculating |

111 | * a T-test. It must have a property .mean (mean), .stdev (stdandard deviation), |

112 | * .var (variance), and .N (sample size). |

113 | */ |

114 | template isSummary(T) { |

115 | enum bool isSummary = is(typeof(T.init.mean)) && is(typeof(T.init.stdev)) && |

116 | is(typeof(T.init.var)) && is(typeof(T.init.N)); |

117 | } |

118 | |

119 | /**One-sample Student's T-test for difference between mean of data and |

120 | * a fixed value. Alternatives are Alt.less, meaning mean(data) < testMean, |

121 | * Alt.greater, meaning mean(data) > testMean, and Alt.twoSided, meaning |

122 | * mean(data)!= testMean. |

123 | * |

124 | * data may be either an iterable with elements implicitly convertible to |

125 | * double or a summary struct (see isSummary). |

126 | * |

127 | * Examples: |

128 | * --- |

129 | * uint[] data = [1,2,3,4,5]; |

130 | * |

131 | * // Test the null hypothesis that the mean of data is >= 1 against the |

132 | * // alternative that the mean of data is < 1. Calculate confidence |

133 | * // intervals at 90%. |

134 | * auto result1 = studentsTTest(data, 1, Alt.less, 0.9); |

135 | * |

136 | * // Do the same thing, only this time we've already calculated the summary |

137 | * // statistics explicitly before passing them to studensTTest. |

138 | * auto summary = meanStdev(data); |

139 | * writeln(summary.stdev); |

140 | * result2 = studentsTTest(summary, 1, Alt.less, 0.9); // Same as result1. |

141 | * assert(result1 == result2); |

142 | * --- |

143 | * |

144 | * Returns: A ConfInt containing T, the P-value and the boundaries of |

145 | * the confidence interval for mean(T) at the level specified. |

146 | * |

147 | * References: http://en.wikipedia.org/wiki/Student%27s_t-test |

148 | */ |

149 | ConfInt studentsTTest(T)(T data, double testMean = 0, Alt alt = Alt.twoSided, |

150 | double confLevel = 0.95) |

151 | if( (isSummary!T || doubleIterable!T)) { |

152 | enforceConfidence(confLevel); |

153 | dstatsEnforce(isFinite(testMean), "testMean must not be infinite or nan."); |

154 | |

155 | static if(isSummary!T) { |

156 | return pairedTTest(data, testMean, alt, confLevel); |

157 | } else static if(doubleIterable!T) { |

158 | return pairedTTest( meanStdev(data), testMean, alt, confLevel); |

159 | } |

160 | } |

161 | |

162 | unittest { |

163 | auto t1 = studentsTTest([1, 2, 3, 4, 5].dup, 2); |

164 | assert(approxEqual(t1.testStat, 1.4142)); |

165 | assert(approxEqual(t1.p, 0.2302)); |

166 | assert(approxEqual(t1.lowerBound, 1.036757)); |

167 | assert(approxEqual(t1.upperBound, 4.963243)); |

168 | assert(t1 == studentsTTest( meanStdev([1,2,3,4,5].dup), 2)); |

169 | |

170 | auto t2 = studentsTTest([1, 2, 3, 4, 5].dup, 2, Alt.less); |

171 | assert(approxEqual(t2.p, .8849)); |

172 | assert(approxEqual(t2.testStat, 1.4142)); |

173 | assert(t2.lowerBound == -double.infinity); |

174 | assert(approxEqual(t2.upperBound, 4.507443)); |

175 | |

176 | auto t3 = studentsTTest( summary([1, 2, 3, 4, 5].dup), 2, Alt.greater); |

177 | assert(approxEqual(t3.p, .1151)); |

178 | assert(approxEqual(t3.testStat, 1.4142)); |

179 | assert(approxEqual(t3.lowerBound, 1.492557)); |

180 | assert(t3.upperBound == double.infinity); |

181 | } |

182 | |

183 | /**Two-sample T test for a difference in means, |

184 | * assumes variances of samples are equal. Alteratives are Alt.less, meaning |

185 | * mean(sample1) - mean(sample2) < testMean, Alt.greater, meaning |

186 | * mean(sample1) - mean(sample2) > testMean, and Alt.twoSided, meaning |

187 | * mean(sample1) - mean(sample2) != testMean. |

188 | * |

189 | * sample1 and sample2 may be either iterables with elements implicitly |

190 | * convertible to double or summary structs (see isSummary). |

191 | * |

192 | * Returns: A ConfInt containing the T statistic, the P-value, and the |

193 | * boundaries of the confidence interval for the difference between means |

194 | * of sample1 and sample2 at the specified level. |

195 | * |

196 | * References: http://en.wikipedia.org/wiki/Student%27s_t-test |

197 | */ |

198 | ConfInt studentsTTest(T, U)(T sample1, U sample2, double testMean = 0, |

199 | Alt alt = Alt.twoSided, double confLevel = 0.95) |

200 | if( (doubleIterable!T || isSummary!T) && (doubleIterable!U || isSummary!U)) { |

201 | enforceConfidence(confLevel); |

202 | dstatsEnforce(isFinite(testMean), "testMean must not be infinite or nan."); |

203 | |

204 | static if(isSummary!T) { |

205 | alias sample1 s1summ; |

206 | } else { |

207 | immutable s1summ = meanStdev(sample1); |

208 | } |

209 | |

210 | static if(isSummary!U) { |

211 | alias sample2 s2summ; |

212 | } else { |

213 | immutable s2summ = meanStdev(sample2); |

214 | } |

215 | |

216 | immutable n1 = s1summ.N, n2 = s2summ.N; |

217 | |

218 | immutable sx1x2 = sqrt((n1 * s1summ.mse + n2 * s2summ.mse) / |

219 | (n1 + n2 - 2)); |

220 | immutable normSd = (sx1x2 * sqrt((1.0 / n1) + (1.0 / n2))); |

221 | immutable meanDiff = s1summ.mean - s2summ.mean; |

222 | ConfInt ret; |

223 | ret.testStat = (meanDiff - testMean) / normSd; |

224 | if(alt == Alt.none) { |

225 | return ret; |

226 | } else if(alt == Alt.less) { |

227 | ret.p = studentsTCDF(ret.testStat, n1 + n2 - 2); |

228 | |

229 | ret.lowerBound = -double.infinity; |

230 | if(confLevel > 0) { |

231 | immutable delta = invStudentsTCDF(1 - confLevel, n1 + n2 - 2) |

232 | * normSd; |

233 | ret.upperBound = meanDiff - delta; |

234 | } else { |

235 | ret.upperBound = meanDiff; |

236 | } |

237 | |

238 | } else if(alt == Alt.greater) { |

239 | ret.p = studentsTCDF(-ret.testStat, n1 + n2 - 2); |

240 | |

241 | ret.upperBound = double.infinity; |

242 | if(confLevel > 0) { |

243 | immutable delta = invStudentsTCDF(1 - confLevel, n1 + n2 - 2) |

244 | * normSd; |

245 | ret.lowerBound = meanDiff + delta; |

246 | } else { |

247 | ret.lowerBound = meanDiff; |

248 | } |

249 | |

250 | } else { |

251 | immutable t = ret.testStat; |

252 | ret.p = 2 * ((t < 0) ? |

253 | studentsTCDF(t, n1 + n2 - 2) : |

254 | studentsTCDFR(t, n1 + n2 - 2)); |

255 | |

256 | if(confLevel > 0) { |

257 | immutable delta = invStudentsTCDF( |

258 | 0.5 * (1 - confLevel), n1 + n2 - 2) * normSd; |

259 | ret.lowerBound = meanDiff + delta; |

260 | ret.upperBound = meanDiff - delta; |

261 | } else { |

262 | ret.lowerBound = meanDiff; |

263 | ret.upperBound = meanDiff; |

264 | } |

265 | } |

266 | return ret; |

267 | } |

268 | |

269 | unittest { |

270 | // Values from R. |

271 | auto t1 = studentsTTest([1,2,3,4,5].dup, [1,3,4,5,7,9].dup); |

272 | assert(approxEqual(t1.p, 0.2346)); |

273 | assert(approxEqual(t1.testStat, -1.274)); |

274 | assert(approxEqual(t1.lowerBound, -5.088787)); |

275 | assert(approxEqual(t1.upperBound, 1.422120)); |

276 | |

277 | |

278 | assert(approxEqual(studentsTTest([1,2,3,4,5].dup, [1,3,4,5,7,9].dup, 0, Alt.less), |

279 | 0.1173)); |

280 | assert(approxEqual(studentsTTest([1,2,3,4,5].dup, [1,3,4,5,7,9].dup, 0, Alt.greater), |

281 | 0.8827)); |

282 | auto t2 = studentsTTest([1,3,5,7,9,11].dup, [2,2,1,3,4].dup, 5); |

283 | assert(approxEqual(t2.p, 0.44444)); |

284 | assert(approxEqual(t2.testStat, -0.7998)); |

285 | assert(approxEqual(t2.lowerBound, -0.3595529)); |

286 | assert(approxEqual(t2.upperBound, 7.5595529)); |

287 | |

288 | |

289 | auto t5 = studentsTTest([1,3,5,7,9,11].dup, [2,2,1,3,4].dup, 0, Alt.less); |

290 | assert(approxEqual(t5.p, 0.965)); |

291 | assert(approxEqual(t5.testStat, 2.0567)); |

292 | assert(approxEqual(t5.upperBound, 6.80857)); |

293 | assert(t5.lowerBound == -double.infinity); |

294 | |

295 | auto t6 = studentsTTest([1,3,5,7,9,11].dup, [2,2,1,3,4].dup, 0, Alt.greater); |

296 | assert(approxEqual(t6.p, 0.03492)); |

297 | assert(approxEqual(t6.testStat, 2.0567)); |

298 | assert(approxEqual(t6.lowerBound, 0.391422)); |

299 | assert(t6.upperBound == double.infinity); |

300 | |

301 | auto t7 = studentsTTest([1, 2, 4], [3]); |

302 | assert(approxEqual(t7.p, 0.7418)); |

303 | assert(approxEqual(t7.testStat, 0.-.378)); |

304 | assert(approxEqual(t7.lowerBound, -8.255833)); |

305 | assert(approxEqual(t7.upperBound, 6.922499)); |

306 | |

307 | } |

308 | |

309 | /**Two-sample T-test for difference in means. Does NOT assume variances are equal. |

310 | * Alteratives are Alt.less, meaning mean(sample1) - mean(sample2) < testMean, |

311 | * Alt.greater, meaning mean(sample1) - mean(sample2) > testMean, and |

312 | * Alt.twoSided, meaning mean(sample1) - mean(sample2) != testMean. |

313 | * |

314 | * sample1 and sample2 may be either iterables with elements implicitly |

315 | * convertible to double or summary structs (see isSummary). |

316 | * |

317 | * Returns: A ConfInt containing the T statistic, the P-value, and the |

318 | * boundaries of the confidence interval for the difference between means |

319 | * of sample1 and sample2 at the specified level. |

320 | * |

321 | * References: http://en.wikipedia.org/wiki/Student%27s_t-test |

322 | */ |

323 | ConfInt welchTTest(T, U)(T sample1, U sample2, double testMean = 0, |

324 | Alt alt = Alt.twoSided, double confLevel = 0.95) |

325 | if( (isSummary!T || doubleIterable!T) && (isSummary!U || doubleIterable!U)) { |

326 | enforceConfidence(confLevel); |

327 | dstatsEnforce(isFinite(testMean), "testMean cannot be infinite or nan."); |

328 | |

329 | static if(isSummary!T) { |

330 | alias sample1 s1summ; |

331 | } else { |

332 | auto s1summ = meanStdev(sample1); |

333 | } |

334 | |

335 | static if(isSummary!U) { |

336 | alias sample2 s2summ; |

337 | } else { |

338 | auto s2summ = meanStdev(sample2); |

339 | } |

340 | |

341 | immutable double n1 = s1summ.N, |

342 | n2 = s2summ.N; |

343 | |

344 | immutable v1 = s1summ.var, v2 = s2summ.var; |

345 | immutable double sx1x2 = sqrt(v1 / n1 + v2 / n2); |

346 | immutable double meanDiff = s1summ.mean - s2summ.mean - testMean; |

347 | immutable double t = meanDiff / sx1x2; |

348 | double numerator = v1 / n1 + v2 / n2; |

349 | numerator *= numerator; |

350 | double denom1 = v1 / n1; |

351 | denom1 = denom1 * denom1 / (n1 - 1); |

352 | double denom2 = v2 / n2; |

353 | denom2 = denom2 * denom2 / (n2 - 1); |

354 | immutable double df = numerator / (denom1 + denom2); |

355 | |

356 | ConfInt ret; |

357 | ret.testStat = t; |

358 | if(alt == Alt.none) { |

359 | return ret; |

360 | } else if(alt == Alt.less) { |

361 | ret.p = studentsTCDF(t, df); |

362 | ret.lowerBound = -double.infinity; |

363 | |

364 | if(confLevel > 0) { |

365 | ret.upperBound = meanDiff + |

366 | testMean - invStudentsTCDF(1 - confLevel, df) * sx1x2; |

367 | } else { |

368 | ret.upperBound = meanDiff + testMean; |

369 | } |

370 | |

371 | } else if(alt == Alt.greater) { |

372 | ret.p = studentsTCDF(-t, df); |

373 | ret.upperBound = double.infinity; |

374 | |

375 | if(confLevel > 0) { |

376 | ret.lowerBound = meanDiff + |

377 | testMean + invStudentsTCDF(1 - confLevel, df) * sx1x2; |

378 | } else { |

379 | ret.lowerBound = meanDiff + testMean; |

380 | } |

381 | |

382 | } else { |

383 | ret.p = 2 * ((t < 0) ? |

384 | studentsTCDF(t, df) : |

385 | studentsTCDF(-t, df)); |

386 | |

387 | if(confLevel > 0) { |

388 | double delta = invStudentsTCDF(0.5 * (1 - confLevel), df) * sx1x2; |

389 | ret.upperBound = meanDiff + testMean - delta; |

390 | ret.lowerBound = meanDiff + testMean + delta; |

391 | } else { |

392 | ret.upperBound = meanDiff + testMean; |

393 | ret.lowerBound = meanDiff + testMean; |

394 | } |

395 | } |

396 | return ret; |

397 | } |

398 | |

399 | unittest { |

400 | // Values from R. |

401 | auto t1 = welchTTest( meanStdev([1,2,3,4,5].dup), [1,3,4,5,7,9].dup, 2); |

402 | assert(approxEqual(t1.p, 0.02285)); |

403 | assert(approxEqual(t1.testStat, -2.8099)); |

404 | assert(approxEqual(t1.lowerBound, -4.979316)); |

405 | assert(approxEqual(t1.upperBound, 1.312649)); |

406 | |

407 | auto t2 = welchTTest([1,2,3,4,5].dup, summary([1,3,4,5,7,9].dup), -1, Alt.less); |

408 | assert(approxEqual(t2.p, 0.2791)); |

409 | assert(approxEqual(t2.testStat, -0.6108)); |

410 | assert(t2.lowerBound == -double.infinity); |

411 | assert(approxEqual(t2.upperBound, 0.7035534)); |

412 | |

413 | auto t3 = welchTTest([1,2,3,4,5].dup, [1,3,4,5,7,9].dup, 0.5, Alt.greater); |

414 | assert(approxEqual(t3.p, 0.9372)); |

415 | assert(approxEqual(t3.testStat, -1.7104)); |

416 | assert(approxEqual(t3.lowerBound, -4.37022)); |

417 | assert(t3.upperBound == double.infinity); |

418 | |

419 | assert(approxEqual(welchTTest([1,3,5,7,9,11].dup, [2,2,1,3,4].dup).p, 0.06616)); |

420 | assert(approxEqual(welchTTest([1,3,5,7,9,11].dup, [2,2,1,3,4].dup, 0, |

421 | Alt.less).p, 0.967)); |

422 | assert(approxEqual(welchTTest([1,3,5,7,9,11].dup, [2,2,1,3,4].dup, 0, |

423 | Alt.greater).p, 0.03308)); |

424 | } |

425 | |

426 | /**Paired T test. Tests the hypothesis that the mean difference between |

427 | * corresponding elements of before and after is testMean. Alternatives are |

428 | * Alt.less, meaning the that the true mean difference (before[i] - after[i]) |

429 | * is less than testMean, Alt.greater, meaning the true mean difference is |

430 | * greater than testMean, and Alt.twoSided, meaning the true mean difference is not |

431 | * equal to testMean. |

432 | * |

433 | * before and after must be input ranges with elements implicitly convertible |

434 | * to double. |

435 | * |

436 | * Returns: A ConfInt containing the T statistic, the P-value, and the |

437 | * boundaries of the confidence interval for the mean difference between |

438 | * corresponding elements of sample1 and sample2 at the specified level. |

439 | * |

440 | * References: http://en.wikipedia.org/wiki/Student%27s_t-test |

441 | */ |

442 | ConfInt pairedTTest(T, U)(T before, U after, double testMean = 0, |

443 | Alt alt = Alt.twoSided, double confLevel = 0.95) |

444 | if(doubleInput!(T) && doubleInput!(U) && isInputRange!T && isInputRange!U) { |

445 | enforceConfidence(confLevel); |

446 | dstatsEnforce(isFinite(testMean), "testMean cannot be infinite or nan."); |

447 | |

448 | MeanSD msd; |

449 | while(!before.empty && !after.empty) { |

450 | double diff = cast(double) before.front - cast(double) after.front; |

451 | before.popFront; |

452 | after.popFront; |

453 | msd.put(diff); |

454 | } |

455 | |

456 | return pairedTTest(msd, testMean, alt, confLevel); |

457 | } |

458 | |

459 | /**Compute the test directly from summary statistics of the differences between |

460 | * corresponding samples. |

461 | * |

462 | * Examples: |

463 | * --- |

464 | * float[] data1 = getSomeDataSet(); |

465 | * float[] data2 = getSomeOtherDataSet(); |

466 | * assert(data1.length == data2.length); |

467 | * |

468 | * // Calculate summary statistics on difference explicitly. |

469 | * MeanSD summary; |

470 | * foreach(i; 0..data1.length) { |

471 | * summary.put(data1[i] - data2[i]); |

472 | * } |

473 | * |

474 | * // Test the null hypothesis that the mean difference between corresponding |

475 | * // elements (data1[i] - data2[i]) is greater than 5 against the null that it |

476 | * // is <= 5. Calculate confidence intervals at 99%. |

477 | * auto result = pairedTTest(summary, 5, alt, 0.99); |

478 | * --- |

479 | * |

480 | * References: http://en.wikipedia.org/wiki/Student%27s_t-test |

481 | */ |

482 | ConfInt pairedTTest(T)(T diffSummary, double testMean = 0, |

483 | Alt alt = Alt.twoSided, double confLevel = 0.95) |

484 | if(isSummary!T) { |

485 | enforceConfidence(confLevel); |

486 | dstatsEnforce(isFinite(testMean), "testMean cannot be infinite or nan."); |

487 | |

488 | if(diffSummary.N < 2) { |

489 | return ConfInt.init; |

490 | } |

491 | |

492 | // Save typing. |

493 | alias diffSummary msd; |

494 | |

495 | ConfInt ret; |

496 | ret.testStat = (msd.mean - testMean) / msd.stdev * sqrt(msd.N); |

497 | auto sampleMean = msd.mean; |

498 | auto sampleSd = msd.stdev; |

499 | double normSd = sampleSd / sqrt(msd.N); |

500 | ret.testStat = (sampleMean - testMean) / normSd; |

501 | |

502 | if(alt == Alt.none) { |

503 | return ret; |

504 | } else if(alt == Alt.less) { |

505 | ret.p = studentsTCDF(ret.testStat, msd.N - 1); |

506 | ret.lowerBound = -double.infinity; |

507 | |

508 | if(confLevel > 0) { |

509 | double delta = invStudentsTCDF(1 - confLevel, msd.N - 1) * normSd; |

510 | ret.upperBound = sampleMean - delta; |

511 | } else { |

512 | ret.upperBound = sampleMean; |

513 | } |

514 | |

515 | } else if(alt == Alt.greater) { |

516 | ret.p = studentsTCDF(-ret.testStat, msd.N - 1); |

517 | ret.upperBound = double.infinity; |

518 | |

519 | if(confLevel > 0) { |

520 | double delta = invStudentsTCDF(1 - confLevel, msd.N - 1) * normSd; |

521 | ret.lowerBound = sampleMean + delta; |

522 | } else { |

523 | ret.lowerBound = sampleMean; |

524 | } |

525 | |

526 | } else { |

527 | immutable double t = ret.testStat; |

528 | ret.p = 2 * ((t < 0) ? |

529 | studentsTCDF(t, msd.N - 1) : |

530 | studentsTCDF(-t, msd.N - 1)); |

531 | |

532 | if(confLevel > 0) { |

533 | double delta = invStudentsTCDF(0.5 * (1 - confLevel), msd.N - 1) * normSd; |

534 | ret.lowerBound = sampleMean + delta; |

535 | ret.upperBound = sampleMean - delta; |

536 | } else { |

537 | ret.lowerBound = ret.upperBound = sampleMean; |

538 | } |

539 | |

540 | } |

541 | return ret; |

542 | } |

543 | |

544 | unittest { |

545 | // Values from R. |

546 | auto t1 = pairedTTest([3,2,3,4,5].dup, [2,3,5,5,6].dup, 1); |

547 | assert(approxEqual(t1.p, 0.02131)); |

548 | assert(approxEqual(t1.testStat, -3.6742)); |

549 | assert(approxEqual(t1.lowerBound, -2.1601748)); |

550 | assert(approxEqual(t1.upperBound, 0.561748)); |

551 | |

552 | assert(approxEqual(pairedTTest([3,2,3,4,5].dup, [2,3,5,5,6].dup, 0, Alt.less).p, 0.0889)); |

553 | assert(approxEqual(pairedTTest([3,2,3,4,5].dup, [2,3,5,5,6].dup, 0, Alt.greater).p, 0.9111)); |

554 | assert(approxEqual(pairedTTest([3,2,3,4,5].dup, [2,3,5,5,6].dup, 0, Alt.twoSided).p, 0.1778)); |

555 | assert(approxEqual(pairedTTest([3,2,3,4,5].dup, [2,3,5,5,6].dup, 1, Alt.less).p, 0.01066)); |

556 | assert(approxEqual(pairedTTest([3,2,3,4,5].dup, [2,3,5,5,6].dup, 1, Alt.greater).p, 0.9893)); |

557 | } |

558 | |

559 | /**Tests the null hypothesis that the variances of all groups are equal against |

560 | * the alternative that heteroscedasticity exists. data must be either a |

561 | * tuple of ranges or a range of ranges. central is an alias for the measure |

562 | * of central tendency to be used. This can be any function that maps a |

563 | * forward range of numeric types to a numeric type. The commonly used ones |

564 | * are median (default) and mean (less robust). Trimmed mean is sometimes |

565 | * useful, but is currently not implemented in dstats.summary. |

566 | * |

567 | * References: |

568 | * Levene, Howard (1960). "Robust tests for equality of variances". in Ingram |

569 | * Olkin, Harold Hotelling et al. Contributions to Probability and Statistics: |

570 | * Essays in Honor of Harold Hotelling. Stanford University Press. pp. 278-292. |

571 | * |

572 | * Examples: |

573 | * --- |

574 | * int[] sample1 = [1,2,3,4,5]; |

575 | * int[] sample2 = [100,200,300,400,500]; |

576 | * auto result = levenesTest(sample1, sample2); |

577 | * |

578 | * // Clearly the variances are different between these two samples. |

579 | * assert( approxEqual(result.testStat, 10.08)); |

580 | * assert( approxEqual(result.p, 0.01310)); |

581 | * --- |

582 | */ |

583 | |

584 | TestRes levenesTest(alias central = median, T...)(T data) { |

585 | return anovaLevene!(true, false, central, T)(data); |

586 | } |

587 | |

588 | unittest { |

589 | // Values from R's car package, which uses the median definition |

590 | // exclusively. |

591 | auto res1 = levenesTest([1,2,3,4,5][], [2,4,8,16,32][]); |

592 | assert(approxEqual(res1.testStat, 3.0316)); |

593 | assert(approxEqual(res1.p, 0.1198), res1.toString()); |

594 | |

595 | auto res2 = levenesTest([[1,2,3,4,5][], [100,200,300,400,500,600][]][]); |

596 | assert(approxEqual(res2.testStat, 13.586)); |

597 | assert(approxEqual(res2.p, 0.005029)); |

598 | |

599 | auto res3 = levenesTest([8,6,7,5,3,0,9][], [3,6,2,4,3,6][]); |

600 | assert(approxEqual(res3.testStat, 1.1406)); |

601 | assert(approxEqual(res3.p, 0.3084)); |

602 | } |

603 | |

604 | /**The F-test is a one-way ANOVA extension of the T-test to >2 groups. |

605 | * It's useful when you have 3 or more groups with equal variance and want |

606 | * to test whether their means are equal. Data can be input as either a |

607 | * tuple or a range. This may contain any combination of ranges of numeric |

608 | * types, MeanSD structs and Summary structs. |

609 | * |

610 | * Note: This test makes the assumption that all groups have equal variances, |

611 | * also known as homoskedasticity. For a similar test that does not make these |

612 | * assumptions, see welchAnova. |

613 | * |

614 | * Examples: |

615 | * --- |

616 | * uint[] thing1 = [3,1,4,1], |

617 | * thing2 = [5,9,2,6,5,3], |

618 | * thing3 = [5,8,9,7,9,3]; |

619 | * auto result = fTest(thing1, meanStdev(thing2), summary(thing3)); |

620 | * assert(approxEqual(result.testStat, 4.9968)); |

621 | * assert(approxEqual(result.p, 0.02456)); |

622 | * --- |

623 | * |

624 | * References: http://en.wikipedia.org/wiki/F-test |

625 | * |

626 | * Returns: |

627 | * |

628 | * A TestRes containing the F statistic and the P-value for the alternative |

629 | * that the means of the groups are different against the null that they |

630 | * are identical. |

631 | */ |

632 | TestRes fTest(T...)(T data) { |

633 | return anovaLevene!(false, false, "dummy", T)(data); |

634 | } |

635 | |

636 | /**Same as fTest, except that this test does not require the assumption of |

637 | * equal variances. In exchange it's slightly less powerful. |

638 | * |

639 | * References: |

640 | * |

641 | * B.L. Welch. On the Comparison of Several Mean Values: An Alternative Approach |

642 | * Biometrika, Vol. 38, No. 3/4 (Dec., 1951), pp. 330-336. |

643 | */ |

644 | TestRes welchAnova(T...)(T data) { |

645 | return anovaLevene!(false, true, "dummy", T)(data); |

646 | } |

647 | |

648 | unittest { |

649 | // Values from R. |

650 | uint[] thing1 = [3,1,4,1], |

651 | thing2 = [5,9,2,6,5,3], |

652 | thing3 = [5,8,9,7,9,3]; |

653 | auto result = fTest(thing1, meanStdev(thing2), summary(thing3)); |

654 | assert(approxEqual(result.testStat, 4.9968)); |

655 | assert(approxEqual(result.p, 0.02456)); |

656 | |

657 | auto welchRes1 = welchAnova(thing1, thing2, thing3); |

658 | assert( approxEqual(welchRes1.testStat, 6.7813)); |

659 | assert( approxEqual(welchRes1.p, 0.01706)); |

660 | |

661 | // Test array case. |

662 | auto res2 = fTest([thing1, thing2, thing3].dup); |

663 | assert(approxEqual(result.testStat, res2.testStat)); |

664 | assert(approxEqual(result.p, res2.p)); |

665 | |

666 | thing1 = [2,7,1,8,2]; |

667 | thing2 = [8,1,8]; |

668 | thing3 = [2,8,4,5,9]; |

669 | auto res3 = fTest(thing1, thing2, thing3); |

670 | assert(approxEqual(res3.testStat, 0.377)); |

671 | assert(approxEqual(res3.p, 0.6953)); |

672 | |

673 | auto res4 = fTest([summary(thing1), summary(thing2), summary(thing3)][]); |

674 | assert(approxEqual(res4.testStat, res3.testStat)); |

675 | assert(approxEqual(res4.testStat, res3.testStat)); |

676 | |

677 | auto welchRes2 = welchAnova(summary(thing1), thing2, thing3); |

678 | assert( approxEqual(welchRes2.testStat, 0.342)); |

679 | assert( approxEqual(welchRes2.p, 0.7257)); |

680 | |

681 | auto res5 = fTest([1, 2, 4], [3]); |

682 | assert(approxEqual(res5.testStat, 0.1429)); |

683 | assert(approxEqual(res5.p, 0.7418)); |

684 | } |

685 | |

686 | // Levene's Test, Welch ANOVA and F test have massive overlap at the |

687 | // implementation level but less at the conceptual level, so I've combined |

688 | // the implementations into one horribly complicated but well-encapsulated |

689 | // templated function but left the interfaces as three unrelated functions. |

690 | private TestRes anovaLevene(bool levene, bool welch, alias central, T...) |

691 | (T dataIn) { |

692 | static if(dataIn.length == 1) { |

693 | mixin(newFrame); |

694 | auto data = tempdup(dataIn[0]); |

695 | auto withins = newStack!MeanSD(data.length); |

696 | withins[] = MeanSD.init; |

697 | } else { |

698 | enum len = dataIn.length; |

699 | alias dataIn data; |

700 | MeanSD[len] withins; |

701 | } |

702 | |

703 | static if(levene) { |

704 | static if(dataIn.length == 1) { |

705 | auto centers = newStack!double(data.length); |

706 | } else { |

707 | double[len] centers; |

708 | } |

709 | |

710 | foreach(i, category; data) { |

711 | static assert( isForwardRange!(typeof(category)) && |

712 | is(Unqual!(ElementType!(typeof(category))) : double), |

713 | "Can only perform Levene's test on input ranges of elements " ~ |

714 | "implicitly convertible to doubles."); |

715 | |

716 | // The cast is to force conversions to double on alias this'd stuff |

717 | // like the Mean struct. |

718 | centers[i] = cast(double) central(category.save); |

719 | } |

720 | |

721 | double preprocess(double dataPoint, size_t category) { |

722 | return abs(dataPoint - centers[category]); |

723 | } |

724 | } else { |

725 | static double preprocess(double dataPoint, size_t category) { |

726 | return dataPoint; |

727 | } |

728 | } |

729 | |

730 | |

731 | auto DFGroups = data.length - 1; |

732 | ulong N = 0; |

733 | |

734 | foreach(category, range; data) { |

735 | static if(isInputRange!(typeof(range)) && |

736 | is(Unqual!(ElementType!(typeof(range))) : double)) { |

737 | foreach(elem; range) { |

738 | double preprocessed = preprocess(elem, category); |

739 | withins[category].put(preprocessed); |

740 | N++; |

741 | } |

742 | } else static if(isSummary!(typeof(range))) { |

743 | withins[category] = range.toMeanSD(); |

744 | N += roundTo!long(range.N); |

745 | } else { |

746 | static assert(0, "Can only perform ANOVA on input ranges of " ~ |

747 | "numeric types, MeanSD structs and Summary structs, not a " ~ |

748 | typeof(range).stringof ~ "."); |

749 | } |

750 | } |

751 | |

752 | static if(!welch) { |

753 | immutable ulong DFDataPoints = N - data.length; |

754 | double mu = 0; |

755 | foreach(summary; withins) { |

756 | mu += summary.mean * (summary.N / N); |

757 | } |

758 | |

759 | double totalWithin = 0; |

760 | double totalBetween = 0; |

761 | foreach(group; withins) { |

762 | totalWithin += group.mse * (group.N / DFDataPoints); |

763 | immutable diffSq = (group.mean - mu) * (group.mean - mu); |

764 | totalBetween += diffSq * (group.N / DFGroups); |

765 | } |

766 | |

767 | immutable F = totalBetween / totalWithin; |

768 | if(isNaN(F)) { |

769 | return TestRes.init; |

770 | } |

771 | |

772 | return TestRes(F, fisherCDFR(F, DFGroups, DFDataPoints)); |

773 | } else { |

774 | immutable double k = data.length; |

775 | double sumW = 0; |

776 | foreach(summary; withins) { |

777 | sumW += summary.N / summary.var; |

778 | } |

779 | |

780 | double sumFt = 0; |

781 | foreach(summary; withins) { |

782 | sumFt += ((1 - summary.N / summary.var / sumW) ^^ 2) / (summary.N - 1); |

783 | } |

784 | |

785 | immutable kSqM1 = (k * k - 1.0); |

786 | immutable df2 = 1.0 / (3.0 / kSqM1 * sumFt); |

787 | immutable denom = 1 + 2 * (k - 2.0) / kSqM1 * sumFt; |

788 | |

789 | double yHat = 0; |

790 | foreach(i, summary; withins) { |

791 | yHat += summary.mean * (summary.N / summary.var); |

792 | } |

793 | yHat /= sumW; |

794 | |

795 | double numerator = 0; |

796 | foreach(i, summary; withins) { |

797 | numerator += summary.N / summary.var * ((summary.mean - yHat) ^^ 2); |

798 | } |

799 | numerator /= (k - 1); |

800 | |

801 | immutable F = numerator / denom; |

802 | if(isNaN(F)) { |

803 | return TestRes.init; |

804 | } |

805 | |

806 | return TestRes(F, fisherCDFR(F, DFGroups, df2)); |

807 | } |

808 | } |

809 | |

810 | /**Performs a correlated sample (within-subjects) ANOVA. This is a |

811 | * generalization of the paired T-test to 3 or more treatments. This |

812 | * function accepts data as either a tuple of ranges (1 for each treatment, |

813 | * such that a given index represents the same subject in each range) or |

814 | * similarly as a range of ranges. |

815 | * |

816 | * Returns: A TestRes with the F-statistic and P-value for the null that |

817 | * the the variable being measured did not vary across treatments against the |

818 | * alternative that it did. |

819 | * |

820 | * Examples: |

821 | * --- |

822 | * // Test the hypothesis that alcohol, loud music, caffeine and sleep |

823 | * // deprivation all have equivalent effects on programming ability. |

824 | * |

825 | * uint[] alcohol = [8,6,7,5,3,0,9]; |

826 | * uint[] caffeine = [3,6,2,4,3,6,8]; |

827 | * uint[] noSleep = [3,1,4,1,5,9,2]; |

828 | * uint[] loudMusic = [2,7,1,8,2,8,1]; |

829 | * // Subject 0 had ability of 8 under alcohol, 3 under caffeine, 3 under |

830 | * // no sleep, 2 under loud music. Subject 1 had ability of 6 under alcohol, |

831 | * // 6 under caffeine, 1 under no sleep, and 7 under loud music, etc. |

832 | * auto result = correlatedAnova(alcohol, caffeine, noSleep, loudMusic); |

833 | * --- |

834 | * |

835 | * References: "Concepts and Applications of Inferrential Statistics". |

836 | * Richard Lowry. Vassar College. version. |

837 | * http://faculty.vassar.edu/lowry/webtext.html |

838 | */ |

839 | TestRes correlatedAnova(T...)(T dataIn) |

840 | if(allSatisfy!(isInputRange, T)) { |

841 | static if(dataIn.length == 1 && isInputRange!(typeof(dataIn[0].front))) { |

842 | mixin(newFrame); |

843 | auto data = tempdup(dataIn[0]); |

844 | auto withins = newStack!MeanSD(data.length); |

845 | withins[] = MeanSD.init; |

846 | } else { |

847 | enum len = dataIn.length; |

848 | alias dataIn data; |

849 | MeanSD[len] withins; |

850 | } |

851 | MeanSD overallSumm; |

852 | double nGroupNeg1 = 1.0 / data.length; |

853 | |

854 | bool someEmpty() { |

855 | foreach(elem; data) { |

856 | if(elem.empty) { |

857 | return true; |

858 | } |

859 | } |

860 | return false; |

861 | } |

862 | |

863 | uint nSubjects = 0; |

864 | double subjSum = 0; |

865 | while(!someEmpty) { |

866 | double subjSumInner = 0; |

867 | foreach(i, elem; data) { |

868 | auto dataPoint = elem.front; |

869 | subjSumInner += dataPoint; |

870 | overallSumm.put(dataPoint); |

871 | withins[i].put(dataPoint); |

872 | data[i].popFront; |

873 | } |

874 | nSubjects++; |

875 | subjSum += subjSumInner * subjSumInner * nGroupNeg1; |

876 | } |

877 | double groupSum = 0; |

878 | foreach(elem; withins) { |

879 | groupSum += elem.mean * elem.N; |

880 | } |

881 | |

882 | groupSum /= sqrt(cast(double) nSubjects * data.length); |

883 | groupSum *= groupSum; |

884 | immutable subjErr = subjSum - groupSum; |

885 | |

886 | double betweenDev = 0; |

887 | immutable mu = overallSumm.mean; |

888 | foreach(group; withins) { |

889 | double diff = (group.mean - mu); |

890 | diff *= diff; |

891 | betweenDev += diff * (group.N / (data.length - 1)); |

892 | } |

893 | |

894 | size_t errDf = data.length * nSubjects - data.length - nSubjects + 1; |

895 | double randError = -subjErr / errDf; |

896 | foreach(group; withins) { |

897 | randError += group.mse * (group.N / errDf); |

898 | } |

899 | |

900 | immutable F = betweenDev / randError; |

901 | if(!(F >= 0)) { |

902 | return TestRes(double.nan, double.nan); |

903 | } |

904 | |

905 | return TestRes(F, fisherCDFR(F, data.length - 1, errDf)); |

906 | } |

907 | |

908 | unittest { |

909 | // Values from VassarStats utility at |

910 | // http://faculty.vassar.edu/lowry/VassarStats.html, but they like to |

911 | // round a lot, so the approxEqual tolerances are fairly wide. I |

912 | // think it's adequate to demonstrate the correctness of this function, |

913 | // though. |

914 | uint[] alcohol = [8,6,7,5,3,0,9]; |

915 | uint[] caffeine = [3,6,2,4,3,6,8]; |

916 | uint[] noSleep = [3,1,4,1,5,9,2]; |

917 | uint[] loudMusic = [2,7,1,8,2,8,1]; |

918 | auto result = correlatedAnova(alcohol, caffeine, noSleep, loudMusic); |

919 | assert(approxEqual(result.testStat, 0.43, 0.0, 0.01)); |

920 | assert(approxEqual(result.p, 0.734, 0.0, 0.01)); |

921 | |

922 | uint[] stuff1 = [3,4,2,6]; |

923 | uint[] stuff2 = [4,1,9,8]; |

924 | auto result2 = correlatedAnova([stuff1, stuff2].dup); |

925 | assert(approxEqual(result2.testStat, 0.72, 0.0, 0.01)); |

926 | assert(approxEqual(result2.p, 0.4584, 0.0, 0.01)); |

927 | } |

928 | |

929 | /**The Kruskal-Wallis rank sum test. Tests the null hypothesis that data in |

930 | * each group is not stochastically ordered with respect to data in each other |

931 | * groups. This is a one-way non-parametric ANOVA and can be thought of |

932 | * as either a generalization of the Wilcoxon rank sum test to >2 groups or |

933 | * a non-parametric equivalent to the F-test. Data can be input as either a |

934 | * tuple of ranges (one range for each group) or a range of ranges |

935 | * (one element for each group). |

936 | * |

937 | * Bugs: Asymptotic approximation of P-value only, not exact. In this case, |

938 | * I'm not sure a practical way to compute the exact P-value even exists. |

939 | * |

940 | * Returns: A TestRes with the K statistic and the P-value for the null that |

941 | * no group is stochastically larger than any other against the alternative that |

942 | * groups are stochastically ordered. |

943 | * |

944 | * References: "Concepts and Applications of Inferrential Statistics". |

945 | * Richard Lowry. Vassar College. version. |

946 | * http://faculty.vassar.edu/lowry/webtext.html |

947 | */ |

948 | TestRes kruskalWallis(T...)(T dataIn) |

949 | if(doubleInput!(typeof(dataIn[0].front)) || allSatisfy!(doubleInput, T)) { |

950 | mixin(newFrame); |

951 | size_t N = 0; |

952 | |

953 | static if(dataIn.length == 1 && isInputRange!(typeof(dataIn[0].front))) { |

954 | auto data = tempdup(dataIn[0]); |

955 | alias ElementType!(typeof(data[0])) C; |

956 | static if(dstats.base.hasLength!(typeof(data[0]))) { |

957 | enum bool useLength = true; |

958 | } else { |

959 | enum bool useLength = false; |

960 | } |

961 | } else { |

962 | enum len = dataIn.length; |

963 | alias dataIn data; |

964 | alias staticMap!(ElementType, T) Es; |

965 | alias CommonType!(Es) C; |

966 | static if(allSatisfy!(dstats.base.hasLength, T)) { |

967 | enum bool useLength = true; |

968 | } else { |

969 | enum bool useLength = false; |

970 | } |

971 | } |

972 | |

973 | size_t[] lengths = newStack!size_t(data.length); |

974 | static if(useLength) { |

975 | foreach(i, rng; data) { |

976 | auto rngLen = rng.length; |

977 | lengths[i] = rngLen; |

978 | N += rngLen; |

979 | } |

980 | auto dataArray = newStack!(Unqual!C)(N); |

981 | size_t pos = 0; |

982 | foreach(rng; data) { |

983 | foreach(elem; rng) { |

984 | dataArray[pos++] = elem; |

985 | } |

986 | } |

987 | } else { |

988 | auto app = appender!(Unqual!(C)[])(); |

989 | foreach(i, rng; data) { |

990 | size_t oldLen = dataArray.length; |

991 | app.put(rng); |

992 | lengths[i] = dataArray.length - oldLen; |

993 | N += lengths[i]; |

994 | } |

995 | auto dataArray = app.data; |

996 | } |

997 | |

998 | double[] ranks = newStack!double(dataArray.length); |

999 | try { |

1000 | rankSort(dataArray, ranks); |

1001 | } catch(SortException) { |

1002 | return TestRes.init; |

1003 | } |

1004 | |

1005 | size_t index = 0; |

1006 | double denom = 0, numer = 0; |

1007 | double rBar = 0.5 * (N + 1); |

1008 | foreach(meanI, l; lengths) { |

1009 | Mean groupStats; |

1010 | foreach(i; index..index + l) { |

1011 | groupStats.put( ranks[i]); |

1012 | double diff = ranks[i] - rBar; |

1013 | diff *= diff; |

1014 | denom += diff; |

1015 | } |

1016 | index += l; |

1017 | double nDiff = groupStats.mean - rBar; |

1018 | nDiff *= nDiff; |

1019 | numer += l * nDiff; |

1020 | } |

1021 | double K = (N - 1) * (numer / denom); |

1022 | |

1023 | // Tie correction. |

1024 | double tieSum = 0; |

1025 | uint nTies = 1; |

1026 | foreach(i; 1..dataArray.length) { |

1027 | if(dataArray[i] == dataArray[i - 1]) { |

1028 | nTies++; |

1029 | } else if(nTies > 1) { |

1030 | double partialSum = nTies; |

1031 | partialSum = (partialSum * partialSum * partialSum) - partialSum; |

1032 | tieSum += partialSum; |

1033 | nTies = 1; |

1034 | } |

1035 | } |

1036 | if(nTies > 1) { |

1037 | double partialSum = nTies; |

1038 | partialSum = (partialSum * partialSum * partialSum) - partialSum; |

1039 | tieSum += partialSum; |

1040 | } |

1041 | double tieDenom = N; |

1042 | tieDenom = (tieDenom * tieDenom * tieDenom) - tieDenom; |

1043 | tieSum = 1 - (tieSum / tieDenom); |

1044 | K *= tieSum; |

1045 | |

1046 | if(isNaN(K)) { |

1047 | return TestRes(double.nan, double.nan); |

1048 | } |

1049 | |

1050 | return TestRes(K, chiSquareCDFR(K, data.length - 1)); |

1051 | } |

1052 | |

1053 | unittest { |

1054 | // These values are from the VassarStat web tool at |

1055 | // http://faculty.vassar.edu/lowry/VassarStats.html . |

1056 | // R is actually wrong here because it apparently doesn't use a correction |

1057 | // for ties. |

1058 | auto res1 = kruskalWallis([3,1,4,1].idup, [5,9,2,6].dup, [5,3,5].dup); |

1059 | assert(approxEqual(res1.testStat, 4.15)); |

1060 | assert(approxEqual(res1.p, 0.1256)); |

1061 | |

1062 | // Test for other input types. |

1063 | auto res2 = kruskalWallis([[3,1,4,1].idup, [5,9,2,6].idup, [5,3,5].idup].dup); |

1064 | assert(res2 == res1); |

1065 | auto res3 = kruskalWallis(map!"a"([3,1,4,1].dup), [5,9,2,6].dup, [5,3,5].dup); |

1066 | assert(res3 == res1); |

1067 | auto res4 = kruskalWallis([map!"a"([3,1,4,1].dup), |

1068 | map!"a"([5,9,2,6].dup), |

1069 | map!"a"([5,3,5].dup)].dup); |

1070 | assert(res4 == res1); |

1071 | |

1072 | // Test w/ one more case, just with one input type. |

1073 | auto res5 = kruskalWallis([2,7,1,8,2].dup, [8,1,8,2].dup, [8,4,5,9,2].dup, |

1074 | [7,1,8,2,8,1,8].dup); |

1075 | assert(approxEqual(res5.testStat, 1.06)); |

1076 | assert(approxEqual(res5.p, 0.7867)); |

1077 | } |

1078 | |

1079 | /**The Friedman test is a non-parametric within-subject ANOVA. It's useful |

1080 | * when parametric assumptions cannot be made. Usage is identical to |

1081 | * correlatedAnova(). |

1082 | * |

1083 | * References: "Concepts and Applications of Inferrential Statistics". |

1084 | * Richard Lowry. Vassar College. version. |

1085 | * http://faculty.vassar.edu/lowry/webtext.html |

1086 | * |

1087 | * Bugs: No exact P-value calculation. Asymptotic approx. only. |

1088 | */ |

1089 | TestRes friedmanTest(T...)(T dataIn) |

1090 | if(doubleInput!(typeof(dataIn[0].front)) || allSatisfy!(doubleInput, T)) { |

1091 | static if(dataIn.length == 1 && isInputRange!(typeof(dataIn[0].front))) { |

1092 | mixin(newFrame); |

1093 | auto data = tempdup(dataIn[0]); |

1094 | auto ranks = newStack!double(data.length); |

1095 | auto dataPoints = newStack!double(data.length); |

1096 | auto colMeans = newStack!Mean(data.length); |

1097 | colMeans[] = Mean.init; |

1098 | } else { |

1099 | enum len = dataIn.length; |

1100 | alias dataIn data; |

1101 | double[len] ranks; |

1102 | double[len] dataPoints; |

1103 | Mean[len] colMeans; |

1104 | } |

1105 | double rBar = cast(double) data.length * (data.length + 1.0) / 2.0; |

1106 | MeanSD overallSumm; |

1107 | |

1108 | bool someEmpty() { |

1109 | foreach(elem; data) { |

1110 | if(elem.empty) { |

1111 | return true; |

1112 | } |

1113 | } |

1114 | return false; |

1115 | } |

1116 | |

1117 | uint N = 0; |

1118 | while(!someEmpty) { |

1119 | foreach(i, range; data) { |

1120 | dataPoints[i] = data[i].front; |

1121 | data[i].popFront; |

1122 | } |

1123 | |

1124 | try { |

1125 | rank(dataPoints[], ranks[]); |

1126 | } catch(SortException) { |

1127 | return TestRes.init; |

1128 | } |

1129 | |

1130 | foreach(i, rank; ranks) { |

1131 | colMeans[i].put(rank); |

1132 | overallSumm.put(rank); |

1133 | } |

1134 | N++; |

1135 | } |

1136 | |

1137 | double between = 0; |

1138 | double mu = overallSumm.mean; |

1139 | foreach(mean; colMeans) { |

1140 | double diff = mean.mean - overallSumm.mean; |

1141 | between += diff * diff; |

1142 | } |

1143 | between *= N; |

1144 | double within = overallSumm.mse * (overallSumm.N / (overallSumm.N - N)); |

1145 | double chiSq = between / within; |

1146 | double df = data.length - 1; |

1147 | return TestRes(chiSq, chiSquareCDFR(chiSq, df)); |

1148 | } |

1149 | |

1150 | unittest { |

1151 | // Values from R |

1152 | uint[] alcohol = [8,6,7,5,3,0,9]; |

1153 | uint[] caffeine = [3,6,2,4,3,6,8]; |

1154 | uint[] noSleep = [3,1,4,1,5,9,2]; |

1155 | uint[] loudMusic = [2,7,1,8,2,8,1]; |

1156 | auto result = friedmanTest(alcohol, caffeine, noSleep, loudMusic); |

1157 | assert(approxEqual(result.testStat, 1.7463)); |

1158 | assert(approxEqual(result.p, 0.6267)); |

1159 | |

1160 | uint[] stuff1 = [3,4,2,6]; |

1161 | uint[] stuff2 = [4,1,9,8]; |

1162 | auto result2 = friedmanTest([stuff1, stuff2].dup); |

1163 | assert(approxEqual(result2.testStat, 1)); |

1164 | assert(approxEqual(result2.p, 0.3173)); |

1165 | } |

1166 | |

1167 | /**Computes Wilcoxon rank sum test statistic and P-value for |

1168 | * a set of observations against another set, using the given alternative. |

1169 | * Alt.less means that sample1 is stochastically less than sample2. |

1170 | * Alt.greater means sample1 is stochastically greater than sample2. |

1171 | * Alt.twoSided means sample1 is stochastically less than or greater than |

1172 | * sample2. |

1173 | * |

1174 | * exactThresh is the threshold value of (n1 + n2) at which this function |

1175 | * switches from exact to approximate computation of the p-value. Do not set |

1176 | * exactThresh to more than 200, as the exact |

1177 | * calculation is both very slow and not numerically stable past this point, |

1178 | * and the asymptotic calculation is very good for N this large. To disable |

1179 | * exact calculation entirely, set exactThresh to 0. |

1180 | * |

1181 | * Notes: Exact p-value computation is never used when ties are present in the |

1182 | * data, because it is not computationally feasible. |

1183 | * |

1184 | * Input ranges for this function must define a length. |

1185 | * |

1186 | * This test is also known as the Mann-Whitney U test. |

1187 | * |

1188 | * Returns: A TestRes containing the W test statistic and the P-value against |

1189 | * the given alternative. |

1190 | * |

1191 | * References: http://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U |

1192 | * |

1193 | * StackOverflow Question 376003 http://stackoverflow.com/questions/376003 |

1194 | * |

1195 | * Loughborough University MLSC Statistics 2.3 The Mann-Whitney U Test |

1196 | * http://mlsc.lboro.ac.uk/resources/statistics/Mannwhitney.pdf |

1197 | */ |

1198 | TestRes wilcoxonRankSum(T, U) |

1199 | (T sample1, U sample2, Alt alt = Alt.twoSided, uint exactThresh = 50) |

1200 | if(isInputRange!T && isInputRange!U && |

1201 | is(typeof(sample1.front < sample2.front) == bool) && |

1202 | is(CommonType!(ElementType!T, ElementType!U))) { |

1203 | |

1204 | mixin(newFrame); |

1205 | alias Unqual!(CommonType!(ElementType!(T), ElementType!(U))) C; |

1206 | |

1207 | static if(dstats.base.hasLength!T && dstats.base.hasLength!U) { |

1208 | auto n1 = sample1.length, n2 = sample2.length, N = n1 + n2; |

1209 | auto combined = newStack!(C)(N); |

1210 | rangeCopy(combined[0..n1], sample1); |

1211 | rangeCopy(combined[n1..$], sample2); |

1212 | } else { |

1213 | auto app = appender!(C[])(); |

1214 | |

1215 | foreach(elem; sample1) { |

1216 | app.put(elem); |

1217 | } |

1218 | |

1219 | uint n1 = app.data.length; |

1220 | foreach(elem; sample2) { |

1221 | app.put(elem); |

1222 | } |

1223 | |

1224 | auto combined = app.data; |

1225 | uint N = combined.length; |

1226 | uint n2 = N - n1; |

1227 | } |

1228 | |

1229 | double[] ranks = newStack!(double)(N); |

1230 | try { |

1231 | rankSort(combined, ranks); |

1232 | } catch(SortException) { |

1233 | return TestRes.init; |

1234 | } |

1235 | double w = reduce!("a + b") |

1236 | (0.0, ranks[0..n1]) - cast(ulong) n1 * (n1 + 1) / 2UL; |

1237 | |

1238 | if(alt == Alt.none) { |

1239 | return TestRes(w); |

1240 | } |

1241 | |

1242 | double tieSum = 0; |

1243 | // combined is sorted by rankSort. Can use it to figure out how many |

1244 | // ties we have w/o another allocation or sorting. |

1245 | enum oneOverTwelve = 1.0 / 12.0; |

1246 | tieSum = 0; |

1247 | ulong nties = 1; |

1248 | foreach(i; 1..N) { |

1249 | if(combined[i] == combined[i - 1]) { |

1250 | nties++; |

1251 | } else { |

1252 | if(nties == 1) |

1253 | continue; |

1254 | tieSum += ((nties * nties * nties) - nties) * oneOverTwelve; |

1255 | nties = 1; |

1256 | } |

1257 | } |

1258 | // Handle last run. |

1259 | if(nties > 1) { |

1260 | tieSum += ((nties * nties * nties) - nties) * oneOverTwelve; |

1261 | } |

1262 | |

1263 | immutable p = wilcoxonRankSumPval(w, n1, n2, alt, tieSum, exactThresh); |

1264 | return TestRes(w, p); |

1265 | } |

1266 | |

1267 | unittest { |

1268 | // Values from R. |

1269 | |

1270 | assert(wilcoxonRankSum([1, 2, 3, 4, 5].dup, [2, 4, 6, 8, 10].dup).testStat == 5); |

1271 | assert(wilcoxonRankSum([2, 4, 6, 8, 10].dup, [1, 2, 3, 4, 5].dup).testStat == 20); |

1272 | assert(wilcoxonRankSum([3, 7, 21, 5, 9].dup, [2, 4, 6, 8, 10].dup).testStat == 15); |

1273 | |

1274 | // Simple stuff (no ties) first. Testing approximate |

1275 | // calculation first. |

1276 | assert(approxEqual(wilcoxonRankSum([2,4,6,8,12].dup, [1,3,5,7,11,9].dup, |

1277 | Alt.twoSided, 0), 0.9273)); |

1278 | assert(approxEqual(wilcoxonRankSum([2,4,6,8,12].dup, [1,3,5,7,11,9].dup, |

1279 | Alt.less, 0), 0.6079)); |

1280 | assert(approxEqual(wilcoxonRankSum([2,4,6,8,12].dup, [1,3,5,7,11,9].dup, |

1281 | Alt.greater, 0).p, 0.4636)); |

1282 | assert(approxEqual(wilcoxonRankSum([1,2,6,10,12].dup, [3,5,7,8,13,15].dup, |

1283 | Alt.twoSided, 0).p, 0.4113)); |

1284 | assert(approxEqual(wilcoxonRankSum([1,2,6,10,12].dup, [3,5,7,8,13,15].dup, |

1285 | Alt.less, 0).p, 0.2057)); |

1286 | assert(approxEqual(wilcoxonRankSum([1,2,6,10,12].dup, |

1287 | map!"a"([3,5,7,8,13,15].dup), Alt.greater, 0).p, 0.8423)); |

1288 | assert(approxEqual(wilcoxonRankSum([1,3,5,7,9].dup, [2,4,6,8,10].dup, |

1289 | Alt.twoSided, 0), .6745)); |

1290 | assert(approxEqual(wilcoxonRankSum([1,3,5,7,9].dup, [2,4,6,8,10].dup, |

1291 | Alt.less, 0), .3372)); |

1292 | assert(approxEqual(wilcoxonRankSum([1,3,5,7,9].dup, [2,4,6,8,10].dup, |

1293 | Alt.greater, 0), .7346)); |

1294 | |

1295 | // Now, lots of ties. |

1296 | assert(approxEqual(wilcoxonRankSum([1,2,3,4,5].dup, [2,3,4,5,6].dup, |

1297 | Alt.twoSided, 0), 0.3976)); |

1298 | assert(approxEqual(wilcoxonRankSum([1,2,3,4,5].dup, [2,3,4,5,6].dup, |

1299 | Alt.less, 0), 0.1988)); |

1300 | assert(approxEqual(wilcoxonRankSum([1,2,3,4,5].dup, [2,3,4,5,6].dup, |

1301 | Alt.greater, 0), 0.8548)); |

1302 | assert(approxEqual(wilcoxonRankSum([1,2,1,1,2].dup, [1,2,3,1,1].dup, |

1303 | Alt.twoSided, 0), 0.9049)); |

1304 | assert(approxEqual(wilcoxonRankSum([1,2,1,1,2].dup, [1,2,3,1,1].dup, |

1305 | Alt.less, 0), 0.4524)); |

1306 | assert(approxEqual(wilcoxonRankSum([1,2,1,1,2].dup, [1,2,3,1,1].dup, |

1307 | Alt.greater, 0), 0.64)); |

1308 | |

1309 | // Now, testing the exact calculation on the same data. |

1310 | assert(approxEqual(wilcoxonRankSum([2,4,6,8,12].dup, [1,3,5,7,11,9].dup, |

1311 | Alt.twoSided), 0.9307)); |

1312 | assert(approxEqual(wilcoxonRankSum([2,4,6,8,12].dup, [1,3,5,7,11,9].dup, |

1313 | Alt.less), 0.6039)); |

1314 | assert(approxEqual(wilcoxonRankSum([2,4,6,8,12].dup, [1,3,5,7,11,9].dup, |

1315 | Alt.greater), 0.4654)); |

1316 | assert(approxEqual(wilcoxonRankSum([1,2,6,10,12].dup, [3,5,7,8,13,15].dup, |

1317 | Alt.twoSided), 0.4286)); |

1318 | assert(approxEqual(wilcoxonRankSum([1,2,6,10,12].dup, [3,5,7,8,13,15].dup, |

1319 | Alt.less), 0.2143)); |

1320 | assert(approxEqual(wilcoxonRankSum([1,2,6,10,12].dup, [3,5,7,8,13,15].dup, |

1321 | Alt.greater), 0.8355)); |

1322 | assert(approxEqual(wilcoxonRankSum([1,3,5,7,9].dup, [2,4,6,8,10].dup, |

1323 | Alt.twoSided), .6905)); |

1324 | assert(approxEqual(wilcoxonRankSum([1,3,5,7,9].dup, [2,4,6,8,10].dup, |

1325 | Alt.less), .3452)); |

1326 | assert(approxEqual(wilcoxonRankSum([1,3,5,7,9].dup, [2,4,6,8,10].dup, |

1327 | Alt.greater), .7262)); |

1328 | } |

1329 | |

1330 | private |

1331 | double wilcoxonRankSumPval(double w, ulong n1, ulong n2, Alt alt = Alt.twoSided, |

1332 | double tieSum = 0, uint exactThresh = 50) { |

1333 | if(alt == Alt.none) { |

1334 | return double.nan; |

1335 | } |

1336 | |

1337 | immutable double N = n1 + n2; |

1338 | |

1339 | if(N < exactThresh && tieSum == 0) { |

1340 | return wilcoxRSPExact(roundTo!uint(w), cast(uint) n1, cast(uint) n2, alt); |

1341 | } |

1342 | |

1343 | immutable sd = sqrt(cast(double) (n1 * n2) / (N * (N - 1)) * |

1344 | ((N * N * N - N) / 12 - tieSum)); |

1345 | |

1346 | // Can happen if all samples are tied. |

1347 | if(!(sd > 0)) { |

1348 | return double.nan; |

1349 | } |

1350 | |

1351 | immutable mean = (n1 * n2) / 2.0; |

1352 | |

1353 | if(alt == Alt.twoSided) { |

1354 | if(abs(w - mean) < 0.5) { |

1355 | return 1; |

1356 | } else if(w < mean) { |

1357 | return 2 * normalCDF(w + 0.5, mean, sd); |

1358 | } else { |

1359 | assert(w > mean); |

1360 | return 2 * normalCDFR(w - 0.5, mean, sd); |

1361 | } |

1362 | } else if(alt == Alt.less) { |

1363 | return normalCDF(w + 0.5, mean, sd); |

1364 | } else if(alt == Alt.greater) { |

1365 | return normalCDFR(w - 0.5, mean, sd); |

1366 | } |

1367 | |

1368 | assert(0); |

1369 | } |

1370 | |

1371 | unittest { |

1372 | /* Values from R. I could only get good values for Alt.less directly. |

1373 | * Using W-values to test Alt.twoSided, Alt.greater indirectly.*/ |

1374 | assert(approxEqual(wilcoxonRankSumPval(1200, 50, 50, Alt.less), .3670)); |

1375 | assert(approxEqual(wilcoxonRankSumPval(1500, 50, 50, Alt.less), .957903)); |

1376 | assert(approxEqual(wilcoxonRankSumPval(8500, 100, 200, Alt.less), .01704)); |

1377 | auto w = wilcoxonRankSum([2,4,6,8,12].dup, [1,3,5,7,11,9].dup).testStat; |

1378 | assert(approxEqual(wilcoxonRankSumPval(w, 5, 6), 0.9273)); |

1379 | assert(approxEqual(wilcoxonRankSumPval(w, 5, 6, Alt.greater), 0.4636)); |

1380 | assert(approxEqual(wilcoxonRankSumPval(w, 5, 6, Alt.less), 0.6079)); |

1381 | |

1382 | // Monte carlo unit testing: Make sure that the exact and asymptotic |

1383 | // versions agree within a small epsilon; |

1384 | double maxEpsilon = 0; |

1385 | foreach(i; 0..1_000) { |

1386 | uint n1 = uniform(5U, 25U); |

1387 | uint n2 = uniform(5U, 25U); |

1388 | uint testStat = uniform!"[]"(0, (n1 * n2)); |

1389 | |

1390 | foreach(alt; [Alt.less, Alt.greater, Alt.twoSided]) { |

1391 | double approxP = wilcoxonRankSumPval(testStat, n1, n2, alt, 0, 0); |

1392 | double exactP = wilcoxonRankSumPval(testStat, n1, n2, alt, 0, 50); |

1393 | double epsilon = abs(approxP - exactP); |

1394 | assert(epsilon < 0.02); |

1395 | maxEpsilon = max(maxEpsilon, epsilon); |

1396 | } |

1397 | } |

1398 | } |

1399 | |

1400 | /* Used internally by wilcoxonRankSum. This function uses dynamic |

1401 | * programming to count the number of combinations of numbers [1..N] that sum |

1402 | * of length n1 that sum to <= W in O(N * W * n1) time. |

1403 | * Algorithm obtained from StackOverflow Question 376003 |

1404 | * (http://stackoverflow.com/questions/376003).*/ |

1405 | private double wilcoxRSPExact(uint W, uint n1, uint n2, Alt alt = Alt.twoSided) { |

1406 | uint N = n1 + n2; |

1407 | immutable maxPossible = n1 * n2; |

1408 | |

1409 | switch(alt) { |

1410 | case Alt.less: |

1411 | if(W >= maxPossible) { // Value impossibly large |

1412 | return 1; |

1413 | } else if(W * 2 <= maxPossible) { |

1414 | break; |

1415 | } else { |

1416 | return 1 - wilcoxRSPExact(maxPossible - W - 1, n1, n2, Alt.less); |

1417 | } |

1418 | assert(0); |

1419 | case Alt.greater: |

1420 | if(W > maxPossible) { // Value impossibly large |

1421 | return 0; |

1422 | } else if(W * 2 >= maxPossible) { |

1423 | return wilcoxRSPExact(maxPossible - W, n1, n2, Alt.less); |

1424 | } else if(W <= 0) { |

1425 | return 1; |

1426 | } else { |

1427 | return 1 - wilcoxRSPExact(W - 1, n1, n2, Alt.less); |

1428 | } |

1429 | assert(0); |

1430 | case Alt.twoSided: |

1431 | if(W * 2 <= maxPossible) { |

1432 | return min(1, wilcoxRSPExact(W, n1, n2, Alt.less) + |

1433 | wilcoxRSPExact(maxPossible - W, n1, n2, Alt.greater)); |

1434 | } else { |

1435 | return min(1, wilcoxRSPExact(W, n1, n2, Alt.greater) + |

1436 | wilcoxRSPExact(maxPossible - W, n1, n2, Alt.less)); |

1437 | } |

1438 | assert(0); |

1439 | default: |

1440 | assert(0); |

1441 | } |

1442 | |

1443 | W += n1 * (n1 + 1) / 2UL; |

1444 | |

1445 | float* cache = (newStack!(float)((n1 + 1) * (W + 1))).ptr; |

1446 | float* cachePrev = (newStack!(float)((n1 + 1) * (W + 1))).ptr; |

1447 | cache[0..(n1 + 1) * (W + 1)] = 0; |

1448 | cachePrev[0..(n1 + 1) * (W + 1)] = 0; |

1449 | |

1450 | /* Using doubles for the intermediate steps is too slow, but I didn't want to |

1451 | * lose too much precision. Since my sums must be between 0 and 1, I am |

1452 | * using the entire bit space of a float to hold numbers between zero and |

1453 | * one. This is precise to at least 1e-7. This is good enough for a few |

1454 | * reasons: |

1455 | * |

1456 | * 1. This is a p-value, and therefore will likely not be used in |

1457 | * further calculations where rounding error would accumulate. |

1458 | * 2. If this is too slow, the alternative is to use the asymptotic |

1459 | * approximation. This is can have relative errors of several orders |

1460 | * of magnitude in the tails of the distribution, and is therefore |

1461 | * clearly worse. |

1462 | * 3. For very large N, where this function could give completely wrong |

1463 | * answers, it would be so slow that any reasonable person would use the |

1464 | * asymptotic approximation anyhow.*/ |

1465 | |

1466 | |

1467 | // Algorithm based on StackOverflow question 376003. |

1468 | double comb = exp(-logNcomb(N, n1)); |

1469 | double floatMax = cast(double) float.max; |

1470 | cache[0] = cast(float) (comb * floatMax); |

1471 | cachePrev[0] = cast(float) (comb * floatMax); |

1472 | |

1473 | foreach(i; 1..N + 1) { |

1474 | swap(cache, cachePrev); |

1475 | foreach(k; 1..min(i + 1, n1 + 1)) { |

1476 | |

1477 | uint minW = k * (k + 1) / 2; |

1478 | float* curK = cache + k * (W + 1); |

1479 | float* prevK = cachePrev + k * (W + 1); |

1480 | float* prevKm1 = cachePrev + (k - 1) * (W + 1); |

1481 | |

1482 | foreach(w; minW..W + 1) { |

1483 | curK[w] = prevK[w] + ((i <= w) ? prevKm1[w - i] : 0); |

1484 | } |

1485 | } |

1486 | } |

1487 | |

1488 | double sum = 0; |

1489 | float* lastLine = cache + n1 * (W + 1); |

1490 | foreach(w; 1..W + 1) { |

1491 | sum += (cast(double) lastLine[w] / floatMax); |

1492 | } |

1493 | TempAlloc.free; |

1494 | TempAlloc.free; |

1495 | return sum; |

1496 | } |

1497 | |

1498 | unittest { |

1499 | // Values from R. |

1500 | assert(approxEqual(wilcoxRSPExact(14, 5, 6), 0.9307)); |

1501 | assert(approxEqual(wilcoxRSPExact(14, 5, 6, Alt.less), 0.4654)); |

1502 | assert(approxEqual(wilcoxRSPExact(14, 5, 6, Alt.greater), 0.6039)); |

1503 | assert(approxEqual(wilcoxRSPExact(16, 6, 5), 0.9307)); |

1504 | assert(approxEqual(wilcoxRSPExact(16, 6, 5, Alt.less), 0.6039)); |

1505 | assert(approxEqual(wilcoxRSPExact(16, 6, 5, Alt.greater), 0.4654)); |

1506 | assert(approxEqual(wilcoxRSPExact(66, 10, 35, Alt.less), 0.001053)); |

1507 | assert(approxEqual(wilcoxRSPExact(78, 13, 6, Alt.less), 1)); |

1508 | |

1509 | // Mostly to make sure that underflow doesn't happen until |

1510 | // the N's are truly unreasonable: |

1511 | //assert(approxEqual(wilcoxRSPExact(6_000, 120, 120, Alt.less), 0.01276508)); |

1512 | } |

1513 | |

1514 | /**Computes a test statistic and P-value for a Wilcoxon signed rank test against |

1515 | * the given alternative. Alt.less means that elements of before are stochastically |

1516 | * less than corresponding elements of after. Alt.greater means elements of |

1517 | * before are stochastically greater than corresponding elements of after. |

1518 | * Alt.twoSided means there is a significant difference in either direction. |

1519 | * |

1520 | * exactThresh is the threshold value of before.length at which this function |

1521 | * switches from exact to approximate computation of the p-value. Do not set |

1522 | * exactThresh to more than 200, as the exact calculation is both very slow and |

1523 | * not numerically stable past this point, and the asymptotic calculation is |

1524 | * very good for N this large. To disable exact calculation entirely, set |

1525 | * exactThresh to 0. |

1526 | * |

1527 | * Notes: Exact p-value computation is never used when ties are present, |

1528 | * because it is not computationally feasible. |

1529 | * |

1530 | * The input ranges for this function must define a length and must be |

1531 | * forward ranges. |

1532 | * |

1533 | * Returns: A TestRes of the W statistic and the p-value against the given |

1534 | * alternative. |

1535 | * |

1536 | * References: http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test |

1537 | * |

1538 | * StackOverflow Question 376003 http://stackoverflow.com/questions/376003 |

1539 | * |

1540 | * Handbook of Parametric and nonparametric statistical procedures. David Sheskin. |

1541 | * Third Edition. (2004) CRC Press. Pg. 616. |

1542 | */ |

1543 | TestRes wilcoxonSignedRank(T, U)(T before, U after, Alt alt = Alt.twoSided, uint exactThresh = 50) |

1544 | if(doubleInput!(T) && doubleInput!(U) && |

1545 | is(typeof(before.front - after.front) : double)) { |

1546 | uint nZero = 0; |

1547 | byte sign(double input) { |

1548 | if(input < 0) |

1549 | return -1; |

1550 | if(input > 0) |

1551 | return 1; |

1552 | nZero++; |

1553 | return 0; |

1554 | } |

1555 | |

1556 | mixin(newFrame); |

1557 | |

1558 | static if(dstats.base.hasLength!T && dstats.base.hasLength!U) { |

1559 | dstatsEnforce(before.length == after.length, |

1560 | "Ranges must have same lengths for wilcoxonSignedRank."); |

1561 | |

1562 | double[] diffRanks = newStack!(double)(before.length); |

1563 | byte[] signs = newStack!(byte)(before.length); |

1564 | double[] diffs = newStack!(double)(before.length); |

1565 | |

1566 | size_t ii = 0; |

1567 | while(!before.empty && !after.empty) { |

1568 | double diff = cast(double) before.front - cast(double) after.front; |

1569 | signs[ii] = sign(diff); |

1570 | diffs[ii] = abs(diff); |

1571 | ii++; |

1572 | before.popFront; |

1573 | after.popFront; |

1574 | } |

1575 | } else { |

1576 | double[] diffRanks; |

1577 | auto diffApp = appender!(double[])(); |

1578 | auto signApp = appender!(byte[])(); |

1579 | |

1580 | while(!before.empty && !after.empty) { |

1581 | double diff = cast(double) before.front - cast(double) after.front; |

1582 | signApp.put(sign(diff)); |

1583 | diffApp.put(abs(diff)); |

1584 | before.popFront; |

1585 | after.popFront; |

1586 | } |

1587 | |

1588 | auto diffs = diffApp.data; |

1589 | auto signs = signApp.data; |

1590 | diffRanks = newStack!double(diffs.length); |

1591 | } |

1592 | try { |

1593 | rankSort(diffs, diffRanks); |

1594 | } catch(SortException) { |

1595 | return TestRes.init; |

1596 | } |

1597 | |

1598 | ulong N = diffs.length - nZero; |

1599 | |

1600 | double W = 0; |

1601 | foreach(i, dr; diffRanks) { |

1602 | if(signs[i] == 1) { |

1603 | W += dr - nZero; |

1604 | } |

1605 | } |

1606 | |

1607 | // Just a sanity check. Should be mathematically impossible for this |

1608 | // assert to fail. The 1e-5 is for round-off error. |

1609 | assert(W > -1e-5 && W <= (N * (N + 1) / 2) + 1e-5); |

1610 | |

1611 | if(alt == Alt.none) { |

1612 | return TestRes(W); |

1613 | } |

1614 | |

1615 | // Handle ties. |

1616 | double tieSum = 0; |

1617 | |

1618 | // combined is sorted by rankSort. Can use it to figure out how many |

1619 | // ties we have w/o another allocation or sorting. |

1620 | enum denom = 1.0 / 48.0; |

1621 | ulong nties = 1; |

1622 | foreach(i; 1..diffs.length) { |

1623 | if(diffs[i] == diffs[i - 1] && diffs[i] != 0) { |

1624 | nties++; |

1625 | } else { |

1626 | if(nties == 1) |

1627 | continue; |

1628 | tieSum += ((nties * nties * nties) - nties) * denom; |

1629 | nties = 1; |

1630 | } |

1631 | } |

1632 | // Handle last run. |

1633 | if(nties > 1) { |

1634 | tieSum += ((nties * nties * nties) - nties) * denom; |

1635 | } |

1636 | if(nZero > 0 && tieSum == 0) { |

1637 | tieSum = double.nan; // Signal that there were zeros and exact p-val can't be computed. |

1638 | } |

1639 | |

1640 | return TestRes(W, wilcoxonSignedRankPval(W, N, alt, tieSum, exactThresh)); |

1641 | } |

1642 | |

1643 | unittest { |

1644 | // Values from R. |

1645 | alias approxEqual ae; |

1646 | assert(wilcoxonSignedRank([1,2,3,4,5].dup, [2,1,4,5,3].dup).testStat == 7.5); |

1647 | assert(wilcoxonSignedRank([3,1,4,1,5].dup, [2,7,1,8,2].dup).testStat == 6); |

1648 | assert(wilcoxonSignedRank([8,6,7,5,3].dup, [0,9,8,6,7].dup).testStat == 5); |

1649 | |

1650 | // With ties, normal approx. |

1651 | assert(ae(wilcoxonSignedRank([1,2,3,4,5].dup, [2,1,4,5,3].dup), 1)); |

1652 | assert(ae(wilcoxonSignedRank([3,1,4,1,5].dup, map!"a"([2,7,1,8,2].dup)), 0.7865)); |

1653 | assert(ae(wilcoxonSignedRank([8,6,7,5,3].dup, [0,9,8,6,7].dup), 0.5879)); |

1654 | assert(ae(wilcoxonSignedRank([1,2,3,4,5].dup, [2,1,4,5,3].dup, Alt.less), 0.5562)); |

1655 | assert(ae(wilcoxonSignedRank([3,1,4,1,5].dup, [2,7,1,8,2].dup, Alt.less), 0.3932)); |

1656 | assert(ae(wilcoxonSignedRank([8,6,7,5,3].dup, [0,9,8,6,7].dup, Alt.less), 0.2940)); |

1657 | assert(ae(wilcoxonSignedRank([1,2,3,4,5].dup, [2,1,4,5,3].dup, Alt.greater), 0.5562)); |

1658 | assert(ae(wilcoxonSignedRank([3,1,4,1,5].dup, [2,7,1,8,2].dup, Alt.greater), 0.706)); |

1659 | assert(ae(wilcoxonSignedRank([8,6,7,5,3].dup, [0,9,8,6,7].dup, Alt.greater), 0.7918)); |

1660 | assert(ae(wilcoxonSignedRank(cast(int[]) [1,16,2,4,8], cast(int[]) [1,5,2,3,4]).testStat, 6)); |

1661 | assert(ae(wilcoxonSignedRank(cast(int[]) [1,16,2,4,8], cast(int[]) [1,5,2,3,4]), 0.1814)); |

1662 | |

1663 | // Exact. |

1664 | assert(ae(wilcoxonSignedRank([1,2,3,4,5].dup, [2,-4,-8,16,32].dup), 0.625)); |

1665 | assert(ae(wilcoxonSignedRank([1,2,3,4,5].dup, [2,-4,-8,16,32].dup, Alt.less), 0.3125)); |

1666 | assert(ae(wilcoxonSignedRank([1,2,3,4,5].dup, [2,-4,-8,16,32].dup, Alt.greater), 0.7812)); |

1667 | assert(ae(wilcoxonSignedRank([1,2,3,4,5].dup, [2,-4,-8,-16,32].dup), 0.8125)); |

1668 | assert(ae(wilcoxonSignedRank([1,2,3,4,5].dup, [2,-4,-8,-16,32].dup, Alt.less), 0.6875)); |

1669 | assert(ae(wilcoxonSignedRank([1,2,3,4,5].dup, [2,-4,-8,-16,32].dup, Alt.greater), 0.4062)); |

1670 | |

1671 | // Monte carlo unit testing. Make sure exact, approx are really, |

1672 | // really close to each other. |

1673 | double maxEpsilon = 0; |

1674 | foreach(i; 0..1_000) { |

1675 | uint N = uniform(10U, 50U); |

1676 | uint testStat = uniform!"[]"(0, N * (N + 1) / 2); |

1677 | |

1678 | foreach(alt; [Alt.less, Alt.greater, Alt.twoSided]) { |

1679 | double approxP = wilcoxonSignedRankPval(testStat, N, alt, 0, 0); |

1680 | double exactP = wilcoxonSignedRankPval(testStat, N, alt, 0, 50); |

1681 | double epsilon = abs(approxP - exactP); |

1682 | assert(epsilon < 0.02); |

1683 | maxEpsilon = max(maxEpsilon, epsilon); |

1684 | } |

1685 | } |

1686 | } |

1687 | |

1688 | /**Same as the overload, but allows testing whether a range is stochastically |

1689 | * less than or greater than a fixed value mu rather than paired elements of |

1690 | * a second range.*/ |

1691 | TestRes wilcoxonSignedRank(T)(T data, double mu, Alt alt = Alt.twoSided, uint exactThresh = 50) |

1692 | if(doubleInput!(T) && is(typeof(data.front - mu) : double)) { |

1693 | return wilcoxonSignedRank(data, replicate(mu, data.length), alt, exactThresh); |

1694 | } |

1695 | |

1696 | unittest { |

1697 | auto res = wilcoxonSignedRank([-8,-6,2,4,7].dup, 0); |

1698 | assert(approxEqual(res.testStat, 7)); |

1699 | assert(approxEqual(res.p, 1)); |

1700 | } |

1701 | |

1702 | private double wilcoxonSignedRankPval(double W, ulong N, Alt alt = Alt.twoSided, |

1703 | double tieSum = 0, uint exactThresh = 50) |

1704 | in { |

1705 | assert(N > 0); |

1706 | assert(tieSum >= 0 || isNaN(tieSum)); |

1707 | } body { |

1708 | if(alt == Alt.none) { |

1709 | return double.nan; |

1710 | } |

1711 | |

1712 | if(tieSum == 0 && !isNaN(tieSum) && N <= exactThresh) { |

1713 | return wilcoxSRPExact(roundTo!uint(W), to!uint(N), alt); |

1714 | } |

1715 | |

1716 | if(isNaN(tieSum)) { |

1717 | tieSum = 0; |

1718 | } |

1719 | |

1720 | immutable expected = N * (N + 1) * 0.25; |

1721 | immutable sd = sqrt(N * (N + 1) * (2 * N + 1) / 24.0 - tieSum); |

1722 | |

1723 | if(alt == Alt.less) { |

1724 | return normalCDF(W + 0.5, expected, sd); |

1725 | } else if(alt == Alt.greater) { |

1726 | return normalCDFR(W - 0.5, expected, sd); |

1727 | } else { |

1728 | assert(alt == Alt.twoSided); |

1729 | if(abs(W - expected) <= 0.5) { |

1730 | return 1; |

1731 | } else if(W < expected) { |

1732 | return 2 * normalCDF(W + 0.5, expected, sd); |

1733 | } else { |

1734 | assert(W > expected); |

1735 | return 2 * normalCDFR(W - 0.5, expected, sd); |

1736 | } |

1737 | } |

1738 | } |

1739 | // Tested indirectly through other overload. |

1740 | |

1741 | /* Yes, a little cut and paste coding was involved here from wilcoxRSPExact, |

1742 | * but this function and wilcoxRSPExact are just different enough that |

1743 | * it would be more trouble than it's worth to write one generalized |

1744 | * function. |

1745 | * |

1746 | * Algorithm adapted from StackOverflow question 376003 |

1747 | * (http://stackoverflow.com/questions/376003). |

1748 | */ |

1749 | private double wilcoxSRPExact(uint W, uint N, Alt alt = Alt.twoSided) { |

1750 | immutable maxPossible = N * (N + 1) / 2; |

1751 | |

1752 | switch(alt) { |

1753 | case Alt.less: |

1754 | if(W >= maxPossible) { // Value impossibly large |

1755 | return 1; |

1756 | } else if(W * 2 <= maxPossible) { |

1757 | break; |

1758 | } else { |

1759 | return 1 - wilcoxSRPExact(maxPossible - W - 1, N, Alt.less); |

1760 | } |

1761 | case Alt.greater: |

1762 | if(W > maxPossible) { // Value impossibly large |

1763 | return 0; |

1764 | } else if(W == 0) { |

1765 | return 1; |

1766 | } else if(W * 2 >= maxPossible) { |

1767 | return wilcoxSRPExact(maxPossible - W, N, Alt.less); |

1768 | } else { |

1769 | return 1 - wilcoxSRPExact(W - 1, N, Alt.less); |

1770 | } |

1771 | case Alt.twoSided: |

1772 | if(W * 2 <= maxPossible) { |

1773 | return min(1, wilcoxSRPExact(W, N, Alt.less) + |

1774 | wilcoxSRPExact(maxPossible - W, N, Alt.greater)); |

1775 | } else { |

1776 | return min(1, wilcoxSRPExact(W, N, Alt.greater) + |

1777 | wilcoxSRPExact(maxPossible - W, N, Alt.less)); |

1778 | } |

1779 | default: |

1780 | assert(0); |

1781 | } |

1782 | |

1783 | float* cache = (newStack!(float)((N + 1) * (W + 1))).ptr; |

1784 | float* cachePrev = (newStack!(float)((N + 1) * (W + 1))).ptr; |

1785 | cache[0..(N + 1) * (W + 1)] = 0; |

1786 | cachePrev[0..(N + 1) * (W + 1)] = 0; |

1787 | |

1788 | double comb = pow(2.0, -(cast(double) N)); |

1789 | double floatMax = cast(double) float.max; |

1790 | cache[0] = cast(float) (comb * floatMax); |

1791 | cachePrev[0] = cast(float) (comb * floatMax); |

1792 | |

1793 | foreach(i; 1..N + 1) { |

1794 | swap(cache, cachePrev); |

1795 | foreach(k; 1..i + 1) { |

1796 | |

1797 | uint minW = k * (k + 1) / 2; |

1798 | float* curK = cache + k * (W + 1); |

1799 | float* prevK = cachePrev + k * (W + 1); |

1800 | float* prevKm1 = cachePrev + (k - 1) * (W + 1); |

1801 | |

1802 | foreach(w; minW..W + 1) { |

1803 | curK[w] = prevK[w] + ((i <= w) ? prevKm1[w - i] : 0); |

1804 | } |

1805 | } |

1806 | } |

1807 | |

1808 | double sum = 0; |

1809 | foreach(elem; cache[0..(N + 1) * (W + 1)]) { |

1810 | sum += cast(double) elem / (cast(double) float.max); |

1811 | } |

1812 | TempAlloc.free; |

1813 | TempAlloc.free; |

1814 | return sum; |

1815 | } |

1816 | |

1817 | unittest { |

1818 | // Values from R. |

1819 | assert(approxEqual(wilcoxSRPExact(25, 10, Alt.less), 0.4229)); |

1820 | assert(approxEqual(wilcoxSRPExact(25, 10, Alt.greater), 0.6152)); |

1821 | assert(approxEqual(wilcoxSRPExact(25, 10, Alt.twoSided), 0.8457)); |

1822 | assert(approxEqual(wilcoxSRPExact(31, 10, Alt.less), 0.6523)); |

1823 | assert(approxEqual(wilcoxSRPExact(31, 10, Alt.greater), 0.3848)); |

1824 | assert(approxEqual(wilcoxSRPExact(31, 10, Alt.twoSided), 0.7695)); |

1825 | } |

1826 | |

1827 | /**Sign test for differences between paired values. This is a very robust |

1828 | * but very low power test. Alternatives are Alt.less, meaning elements |

1829 | * of before are typically less than corresponding elements of after, |

1830 | * Alt.greater, meaning elements of before are typically greater than |

1831 | * elements of after, and Alt.twoSided, meaning that there is a significant |

1832 | * difference in either direction. |

1833 | * |

1834 | * Returns: A TestRes with the proportion of elements of before that were |

1835 | * greater than the corresponding element of after, and the P-value against |

1836 | * the given alternative. |

1837 | */ |

1838 | TestRes signTest(T, U)(T before, U after, Alt alt = Alt.twoSided) |

1839 | if(doubleInput!(T) && doubleInput!(U) && |

1840 | is(typeof(before.front < after.front) == bool)) { |

1841 | ulong greater, less; |

1842 | while(!before.empty && !after.empty) { |

1843 | if(before.front < after.front) { |

1844 | less++; |

1845 | } else if(after.front < before.front) { |

1846 | greater++; |

1847 | } |

1848 | |

1849 | // Ignore equals. |

1850 | before.popFront; |

1851 | after.popFront; |

1852 | } |

1853 | |

1854 | double propGreater = to!double(greater) / (greater + less); |

1855 | |

1856 | final switch(alt) { |

1857 | case Alt.none: |

1858 | return TestRes(propGreater); |

1859 | case Alt.less: |

1860 | return TestRes(propGreater, |

1861 | binomialCDF(greater, less + greater, 0.5)); |

1862 | case Alt.greater: |

1863 | return TestRes(propGreater, |

1864 | binomialCDF(less, less + greater, 0.5)); |

1865 | case Alt.twoSided: |

1866 | if(less > greater) { |

1867 | return TestRes(propGreater, |

1868 | 2 * binomialCDF(greater, less + greater, 0.5)); |

1869 | } else if(greater > less) { |

1870 | return TestRes(propGreater, |

1871 | 2 * binomialCDF(less, less + greater, 0.5)); |

1872 | } else { |

1873 | return TestRes(propGreater, 1); |

1874 | } |

1875 | } |

1876 | } |

1877 | |

1878 | unittest { |

1879 | alias approxEqual ae; |

1880 | assert(ae(signTest([1,3,4,2,5].dup, [1,2,4,8,16].dup), 1)); |

1881 | assert(ae(signTest([1,3,4,2,5].dup, [1,2,4,8,16].dup, Alt.less), 0.5)); |

1882 | assert(ae(signTest([1,3,4,2,5].dup, [1,2,4,8,16].dup, Alt.greater), 0.875)); |

1883 | assert(ae(signTest([5,3,4,6,8].dup, [1,2,3,4,5].dup, Alt.greater), 0.03125)); |

1884 | assert(ae(signTest([5,3,4,6,8].dup, [1,2,3,4,5].dup, Alt.less), 1)); |

1885 | assert(ae(signTest([5,3,4,6,8].dup, [1,2,3,4,5].dup), 0.0625)); |

1886 | |

1887 | assert(approxEqual(signTest([1,2,6,7,9].dup, 2), 0.625)); |

1888 | assert(ae(signTest([1,2,6,7,9].dup, 2).testStat, 0.75)); |

1889 | } |

1890 | |

1891 | /**Similar to the overload, but allows testing for a difference between a |

1892 | * range and a fixed value mu.*/ |

1893 | TestRes signTest(T)(T data, double mu, Alt alt = Alt.twoSided) |

1894 | if(doubleInput!(T) && is(typeof(data.front < mu) == bool)) { |

1895 | return signTest(data, repeat(mu), alt); |

1896 | } |

1897 | |

1898 | /**Two-sided binomial test for whether P(success) == p. The one-sided |

1899 | * alternatives are covered by dstats.distrib.binomialCDF and binomialCDFR. |

1900 | * k is the number of successes observed, n is the number of trials, p |

1901 | * is the probability of success under the null. |

1902 | * |

1903 | * Returns: The P-value for the alternative that P(success) != p against |

1904 | * the null that P(success) == p. |

1905 | * |

1906 | * Notes: This test can also be performed using multinomialTest(), but this |

1907 | * implementation is much faster and easier to use. |

1908 | */ |

1909 | double binomialTest(ulong k, ulong n, double p) { |

1910 | dstatsEnforce(k <= n, "k must be <= n for binomial test."); |

1911 | dstatsEnforce(p >= 0 && p <= 1, "p must be between 0, 1 for binomial test."); |

1912 | |

1913 | enum epsilon = 1 - 1e-6; // Small but arbitrary constant to deal w/ rounding error. |

1914 | |

1915 | immutable mode = cast(long) ((n + 1) * p); |

1916 | if(k == mode || |

1917 | approxEqual(binomialPMF(k, n, p), binomialPMF(mode, n, p), 1 - epsilon)) { |

1918 | return 1; |

1919 | } else if(k > mode) { |

1920 | immutable double upperPart = binomialCDFR(k, n, p); |

1921 | immutable pExact = binomialPMF(k, n, p); |

1922 | ulong ulim = mode, llim = 0, guess; |

1923 | while(ulim - llim > 1) { |

1924 | guess = (ulim + llim) / 2; |

1925 | immutable double pGuess = binomialPMF(guess, n, p); |

1926 | |

1927 | if(pGuess == pExact) { |

1928 | ulim = guess + 1; |

1929 | llim = guess; |

1930 | break; |

1931 | } else if(pGuess < pExact) { |

1932 | llim = guess; |

1933 | } else { |

1934 | ulim = guess; |

1935 | } |

1936 | } |

1937 | |

1938 | guess = ulim; |

1939 | while(binomialPMF(guess, n, p) < pExact * epsilon) { |

1940 | guess++; |

1941 | } |

1942 | while(guess > 0 && binomialPMF(guess, n, p) > pExact / epsilon) { |

1943 | guess--; |

1944 | } |

1945 | if(guess == 0 && binomialPMF(0, n, p) > pExact / epsilon) { |

1946 | return upperPart; |

1947 | } |

1948 | return upperPart + binomialCDF(guess, n, p); |

1949 | } else { |

1950 | static double myPMF(ulong k, ulong n, double p) { |

1951 | return k > n ? 0 : binomialPMF(k, n, p); |

1952 | } |

1953 | |

1954 | immutable lowerPart = binomialCDF(k, n, p); |

1955 | immutable pExact = binomialPMF(k, n, p); |

1956 | ulong ulim = n + 1, llim = mode, guess; |

1957 | while(ulim - llim > 1) { |

1958 | guess = (ulim + llim) / 2; |

1959 | immutable double pGuess = myPMF(guess, n, p); |

1960 | if(pGuess == pExact) { |

1961 | ulim = guess; |

1962 | llim = guess; |

1963 | break; |

1964 | } else if(pGuess < pExact) { |

1965 | ulim = guess; |

1966 | } else { |

1967 | llim = guess; |

1968 | } |

1969 | } |

1970 | |

1971 | // All this stuff is necessary to deal with round-off error properly. |

1972 | guess = llim; |

1973 | while(myPMF(guess, n, p) < pExact * epsilon && guess > 0) { |

1974 | guess--; |

1975 | } |

1976 | while(myPMF(guess, n, p) > pExact / epsilon) { |

1977 | guess++; |

1978 | } |

1979 | |

1980 | return lowerPart + ((guess > n) ? 0 : binomialCDFR(guess, n, p)); |

1981 | } |

1982 | } |

1983 | |

1984 | unittest { |

1985 | // Values from R. |

1986 | assert(approxEqual(binomialTest(46, 96, 0.5), 0.759649)); |

1987 | assert(approxEqual(binomialTest(44, 56, 0.5), 2.088e-5)); |

1988 | assert(approxEqual(binomialTest(12, 56, 0.5), 2.088e-5)); |

1989 | assert(approxEqual(binomialTest(0, 40, 0.25), 2.236e-5)); |

1990 | assert(approxEqual(binomialTest(5, 16, 0.5), 0.2101)); |

1991 | assert(approxEqual(binomialTest(0, 20, 0.4), 4.16e-5)); |

1992 | assert(approxEqual(binomialTest(20, 20, 0.6), 4.16e-5)); |

1993 | assert(approxEqual(binomialTest(6, 88, 0.1), 0.3784)); |

1994 | assert(approxEqual(binomialTest(3, 4, 0.5), 0.625)); |

1995 | assert(approxEqual(binomialTest(4, 7, 0.8), 0.1480)); |

1996 | assert(approxEqual(binomialTest(3, 9, 0.8), 0.003066)); |

1997 | assert(approxEqual(binomialTest(9, 9, 0.7), 0.06565)); |

1998 | assert(approxEqual(binomialTest(2, 11, 0.1), 0.3026)); |

1999 | assert(approxEqual(binomialTest(1, 11, 0.1), 1)); |

2000 | assert(approxEqual(binomialTest(5, 11, 0.1), 0.002751)); |

2001 | assert(approxEqual(binomialTest(5, 12, 0.5), 0.7744)); |

2002 | assert(approxEqual(binomialTest(12, 12, 0.5), 0.0004883)); |

2003 | assert(approxEqual(binomialTest(12, 13, 0.6), 0.02042)); |

2004 | assert(approxEqual(binomialTest(0, 9, 0.1), 1)); |

2005 | } |

2006 | |

2007 | ///For chiSquareFit and gTestFit, is expected value range counts or proportions? |

2008 | enum Expected { |

2009 | /// |

2010 | count, |

2011 | |

2012 | /// |

2013 | proportion, |

2014 | |

2015 | // Kept for compatibility w/ old style, intentionally not documented, may |

2016 | // eventually be removed. |

2017 | COUNT = count, |

2018 | PROPORTION = proportion |

2019 | } |

2020 | |

2021 | /**Performs a one-way Pearson's chi-square goodness of fit test between a range |

2022 | * of observed and a range of expected values. This is a useful statistical |

2023 | * test for testing whether a set of observations fits a discrete distribution. |

2024 | * |

2025 | * Returns: A TestRes of the chi-square statistic and the P-value for the |

2026 | * alternative hypothesis that observed is not a sample from expected against |

2027 | * the null that observed is a sample from expected. |

2028 | * |

2029 | * Notes: By default, expected is assumed to be a range of expected proportions. |

2030 | * These proportions are automatically normalized, and can sum to any number. |

2031 | * By passing Expected.count in as the last parameter, calculating expected |

2032 | * counts will be skipped, and expected will assume to already be properly |

2033 | * normalized. This is slightly faster, but more importantly |

2034 | * allows input ranges to be used. |

2035 | * |

2036 | * The chi-square test relies on asymptotic statistical properties |

2037 | * and is therefore not considered valid, as a rule of thumb, when expected |

2038 | * counts are below 5. However, this rule is likely to be unnecessarily |

2039 | * stringent in most cases. |

2040 | * |

2041 | * This is, for all practical purposes, an inherently non-directional test. |

2042 | * Therefore, the one-sided verses two-sided option is not provided. |

2043 | * |

2044 | * Examples: |

2045 | * --- |

2046 | * // Test to see whether a set of categorical observations differs |

2047 | * // statistically from a discrete uniform distribution. |

2048 | * |

2049 | * uint[] observed = [980, 1028, 1001, 964, 1102]; |

2050 | * auto expected = repeat(1.0); |

2051 | * auto res2 = chiSquareFit(observed, expected); |

2052 | * assert(approxEqual(res2, 0.0207)); |

2053 | * assert(approxEqual(res2.testStat, 11.59)); |

2054 | * --- |

2055 | * |

2056 | * References: http://en.wikipedia.org/wiki/Pearson%27s_chi-square_test |

2057 | */ |

2058 | TestRes chiSquareFit(T, U)(T observed, U expected, Expected countProp = Expected.proportion) |

2059 | if(doubleInput!(T) && doubleInput!(U)) { |

2060 | return goodnessFit!(pearsonChiSqElem, T, U)(observed, expected, countProp); |

2061 | } |

2062 | |

2063 | unittest { |

2064 | // Test to see whether a set of categorical observations differs |

2065 | // statistically from a discrete uniform distribution. |

2066 | uint[] observed = [980, 1028, 1001, 964, 1102]; |

2067 | auto expected = repeat(cast(double) sum(observed) / observed.length); |

2068 | auto res = chiSquareFit(observed, expected, Expected.count); |

2069 | assert(approxEqual(res, 0.0207)); |

2070 | assert(approxEqual(res.testStat, 11.59)); |

2071 | |

2072 | auto expected2 = [5.0, 5, 5, 5, 5, 0]; |

2073 | observed ~= 0; |

2074 | auto res2 = chiSquareFit(observed, expected2); |

2075 | assert(approxEqual(res2, 0.0207)); |

2076 | assert(approxEqual(res2.testStat, 11.59)); |

2077 | } |

2078 | |

2079 | // Alias for old name, for backwards compatibility. Don't document it |

2080 | // because it will be deprecated eventually. |

2081 | alias chiSquareFit chiSqrFit; |

2082 | |

2083 | /**The G or likelihood ratio chi-square test for goodness of fit. Roughly |

2084 | * the same as Pearson's chi-square test (chiSquareFit), but may be more |

2085 | * accurate in certain situations and less accurate in others. However, it is |

2086 | * still based on asymptotic distributions, and is not exact. Usage is is |

2087 | * identical to chiSquareFit. |

2088 | * |

2089 | * References: http://en.wikipedia.org/wiki/G_test |

2090 | * |

2091 | */ |

2092 | TestRes gTestFit(T, U)(T observed, U expected, Expected countProp = Expected.proportion) |

2093 | if(doubleInput!(T) && doubleInput!(U)) { |

2094 | return goodnessFit!(gTestElem, T, U)(observed, expected, countProp); |

2095 | } |

2096 | // No unittest because I can't find anything to test this against. However, |

2097 | // it's hard to imagine how it could be wrong, given that goodnessFit() and |

2098 | // gTestElem() both work, and, as expected, this function produces roughly |

2099 | // the same results as chiSquareFit. |

2100 | |

2101 | private TestRes goodnessFit(alias elemFun, T, U)(T observed, U expected, Expected countProp) |

2102 | if(doubleInput!(T) && doubleInput!(U)) { |

2103 | if(countProp == Expected.proportion) { |

2104 | dstatsEnforce(isForwardRange!(U), |

2105 | "Can't use expected proportions instead of counts with input ranges."); |

2106 | } |

2107 | |

2108 | uint len = 0; |

2109 | double chiSq = 0; |

2110 | double multiplier = 1; |

2111 | |

2112 | // Normalize proportions to add up to the sum of the data. |

2113 | if(countProp == Expected.proportion) { |

2114 | double expectSum = 0; |

2115 | multiplier = 0; |

2116 | auto obsCopy = observed.save; |

2117 | auto expCopy = expected.save; |

2118 | while(!obsCopy.empty && !expCopy.empty) { |

2119 | multiplier += obsCopy.front; |

2120 | expectSum += expCopy.front; |

2121 | obsCopy.popFront; |

2122 | expCopy.popFront; |

2123 | } |

2124 | multiplier /= expectSum; |

2125 | } |

2126 | |

2127 | while(!observed.empty && !expected.empty) { |

2128 | scope(exit) { |

2129 | observed.popFront(); |

2130 | expected.popFront(); |

2131 | } |

2132 | double e = expected.front * multiplier; |

2133 | |

2134 | // If e is zero, then we should just treat the cell as if it didn't |

2135 | // exist. |

2136 | if(e == 0) { |

2137 | dstatsEnforce(observed.front == 0, |

2138 | "Can't have non-zero observed value w/ zero expected value."); |

2139 | continue; |

2140 | } |

2141 | |

2142 | chiSq += elemFun(observed.front, e); |

2143 | len++; |

2144 | } |

2145 | |

2146 | if(isNaN(chiSq)) { |

2147 | return TestRes(double.nan, double.nan); |

2148 | } |

2149 | |

2150 | return TestRes(chiSq, chiSquareCDFR(chiSq, len - 1)); |

2151 | } |

2152 | |

2153 | /**The exact multinomial goodness of fit test for whether a set of counts |

2154 | * fits a hypothetical distribution. counts is an input range of counts. |

2155 | * proportions is an input range of expected proportions. These are normalized |

2156 | * automatically, so they can sum to any value. |

2157 | * |

2158 | * Returns: The P-value for the null that counts is a sample from proportions |

2159 | * against the alternative that it isn't. |

2160 | * |

2161 | * Notes: This test is EXTREMELY slow for anything but very small samples and |

2162 | * degrees of freedom. The Pearson's chi-square (chiSquareFit()) or likelihood |

2163 | * ratio chi-square (gTestFit()) are good enough approximations unless sample |

2164 | * size is very small. |

2165 | */ |

2166 | double multinomialTest(U, F)(U countsIn, F proportions) |

2167 | if(isInputRange!U && isInputRange!F && |

2168 | isIntegral!(ElementType!U) && isFloatingPoint!(ElementType!(F))) { |

2169 | mixin(newFrame); |

2170 | |

2171 | static if(isRandomAccessRange!U && dstats.base.hasLength!U) { |

2172 | alias countsIn counts; |

2173 | } else { |

2174 | auto counts = tempdup(countsIn); |

2175 | } |

2176 | |

2177 | uint N = sum(counts); |

2178 | |

2179 | double[] logPs; |

2180 | static if(std.range.hasLength!F) { |

2181 | logPs = newStack!double(proportions.length); |

2182 | size_t pIndex; |

2183 | foreach(p; proportions) { |

2184 | logPs[pIndex++] = p; |

2185 | } |

2186 | } else { |

2187 | auto app = appender(logPs); |

2188 | foreach(p; proportions) { |

2189 | app.put(p); |

2190 | } |

2191 | logPs = app.data; |

2192 | } |

2193 | |

2194 | logPs[] /= reduce!"a + b"(0.0, logPs); |

2195 | foreach(ref elem; logPs) { |

2196 | elem = log(elem); |

2197 | } |

2198 | |

2199 | |

2200 | double[] logs = newStack!double(N + 1); |

2201 | logs[0] = 0; |

2202 | foreach(i; 1..logs.length) { |

2203 | logs[i] = log(i); |

2204 | } |

2205 | |

2206 | double nFact = logFactorial(N); |

2207 | double pVal = 0; |

2208 | uint nLeft = N; |

2209 | double pSoFar = nFact; |

2210 | |

2211 | double pActual = nFact; |

2212 | foreach(i, count; counts) { |

2213 | pActual += logPs[i] * count - logFactorial(count); |

2214 | } |

2215 | pActual -= pActual * 1e-6; // Epsilon to handle numerical inaccuracy. |

2216 | |

2217 | void doIt(uint pos) { |

2218 | if(pos == counts.length - 1) { |

2219 | immutable pOld = pSoFar; |

2220 | pSoFar += logPs[$ - 1] * nLeft - logFactorial(nLeft); |

2221 | |

2222 | if(pSoFar <= pActual) { |

2223 | pVal += exp(pSoFar); |

2224 | } |

2225 | pSoFar = pOld; |

2226 | return; |

2227 | } |

2228 | |

2229 | uint nLeftOld = nLeft; |

2230 | immutable pOld = pSoFar; |

2231 | double pAdd = 0; |

2232 | |

2233 | foreach(i; 0..nLeft + 1) { |

2234 | if(i > 0) { |

2235 | pAdd += logPs[pos] - logs[i]; |

2236 | } |

2237 | pSoFar = pOld + pAdd; |

2238 | doIt(pos + 1); |

2239 | nLeft--; |

2240 | } |

2241 | nLeft = nLeftOld; |

2242 | pSoFar = pOld; |

2243 | } |

2244 | doIt(0); |

2245 | return pVal; |

2246 | } |

2247 | |

2248 | unittest { |

2249 | // Nothing to test this against for more than 1 df, but it matches |

2250 | // chi-square roughly and should take the same paths for 2 vs. N degrees |

2251 | // of freedom. |

2252 | for(uint n = 4; n <= 100; n += 4) { |

2253 | foreach(k; 0..n + 1) { |

2254 | for(double p = 0.05; p <= 0.95; p += 0.05) { |

2255 | double bino = binomialTest(k, n, p); |

2256 | double[] ps = [p, 1 - p]; |

2257 | uint[] counts = [k, n - k]; |

2258 | double multino = multinomialTest(counts, ps); |

2259 | //writeln(k, "\t", n, "\t", p, "\t", bino, "\t", multino); |

2260 | assert(approxEqual(bino, multino), |

2261 | text(bino, '\t', multino, '\t', k, '\t', n, '\t', p)); |

2262 | } |

2263 | } |

2264 | } |

2265 | } |

2266 | |

2267 | /**Performs a Pearson's chi-square test on a contingency table of arbitrary |

2268 | * dimensions. When the chi-square test is mentioned, this is usually the one |

2269 | * being referred to. Takes a set of finite forward ranges, one for each column |

2270 | * in the contingency table. These can be expressed either as a tuple of ranges |

2271 | * or a range of ranges. Returns a P-value for the alternative hypothesis that |

2272 | * frequencies in each row of the contingency table depend on the column against |

2273 | * the null that they don't. |

2274 | * |

2275 | * Notes: The chi-square test relies on asymptotic statistical properties |

2276 | * and is therefore not exact. The typical rule of thumb is that each cell |

2277 | * should have an expected value of at least 5. However, this is likely to |

2278 | * be unnecessarily stringent. |

2279 | * |

2280 | * Yates's continuity correction is never used in this implementation. If |

2281 | * you want something that's guaranteed to be conservative, use fisherExact(). |

2282 | * |

2283 | * This is, for all practical purposes, an inherently non-directional test. |

2284 | * Therefore, the one-sided verses two-sided option is not provided. |

2285 | * |

2286 | * For 2x2 contingency tables, fisherExact is a more conservative test, in that |

2287 | * the type I error rate is guaranteed to never be above the nominal P-value. |

2288 | * However, even for small sample sizes this test may produce results closer |

2289 | * to the true P-value, at the risk of possibly being non-conservative. |

2290 | * |

2291 | * Examples: |

2292 | * --- |

2293 | * // Test to see whether the relative frequency of outcome 0, 1, and 2 |

2294 | * // depends on the treatment in some hypothetical experiment. |

2295 | * uint[] drug1 = [1000, 2000, 1500]; |

2296 | * uint[] drug2 = [1500, 3000, 2300]; |

2297 | * uint[] placebo = [500, 1100, 750]; |

2298 | * assert(approxEqual(chiSquareContingency(drug1, drug2, placebo), 0.2397)); |

2299 | * --- |

2300 | * |

2301 | * References: http://en.wikipedia.org/wiki/Pearson%27s_chi-square_test |

2302 | * |

2303 | */ |

2304 | TestRes chiSquareContingency(T...)(T inputData) { |

2305 | return testContingency!(pearsonChiSqElem, T)(inputData); |

2306 | } |

2307 | |

2308 | unittest { |

2309 | // Test array version. Using VassarStat's chi-square calculator. |

2310 | uint[][] table1 = [[60, 80, 70], |

2311 | [20, 50, 40], |

2312 | [10, 15, 11]]; |

2313 | uint[][] table2 = [[60, 20, 10], |

2314 | [80, 50, 15], |

2315 | [70, 40, 11]]; |

2316 | assert(approxEqual(chiSquareContingency(table1), 0.3449)); |

2317 | assert(approxEqual(chiSquareContingency(table2), 0.3449)); |

2318 | assert(approxEqual(chiSquareContingency(table1).testStat, 4.48)); |

2319 | |

2320 | // Test tuple version. |

2321 | auto p1 = chiSquareContingency(cast(uint[]) [31, 41, 59], |

2322 | cast(uint[]) [26, 53, 58], |

2323 | cast(uint[]) [97, 93, 93]); |

2324 | assert(approxEqual(p1, 0.0059)); |

2325 | |

2326 | auto p2 = chiSquareContingency(cast(uint[]) [31, 26, 97], |

2327 | cast(uint[]) [41, 53, 93], |

2328 | cast(uint[]) [59, 58, 93]); |

2329 | assert(approxEqual(p2, 0.0059)); |

2330 | |

2331 | uint[] drug1 = [1000, 2000, 1500]; |

2332 | uint[] drug2 = [1500, 3000, 2300]; |

2333 | uint[] placebo = [500, 1100, 750]; |

2334 | assert(approxEqual(chiSquareContingency(drug1, drug2, placebo), 0.2397)); |

2335 | } |

2336 | |

2337 | // Alias for old name, for backwards compatibility. Don't document it |

2338 | // because it is deprecated and has been scheduled for deprecation for |

2339 | // ages. |

2340 | deprecated alias chiSquareContingency chiSqrContingency; |

2341 | |

2342 | /** |

2343 | This struct is a subtype of TestRes and is used to return the results of |

2344 | gTestContingency. Due to the information theoretic interpretation of |

2345 | the G test, it contains an extra field to return the mutual information |

2346 | in bits. |

2347 | */ |

2348 | struct GTestRes { |

2349 | /// |

2350 | TestRes testRes; |

2351 | |

2352 | /// |

2353 | alias testRes this; |

2354 | |

2355 | /** |

2356 | The mutual info of the two random variables in the joint distribution |

2357 | represented by the contingency table, in bits (base 2). |

2358 | */ |

2359 | double mutualInfo; |

2360 | } |

2361 | |

2362 | /** |

2363 | The G or likelihood ratio chi-square test for contingency tables. Roughly |

2364 | the same as Pearson's chi-square test (chiSquareContingency), but may be more |

2365 | accurate in certain situations and less accurate in others. |

2366 | |

2367 | Like Pearson's Chi-square, the G-test is based on asymptotic distributions, |

2368 | and is not exact. Usage is is identical to chiSquareContingency. |

2369 | |

2370 | Note: This test can be thought of as a test for nonzero mutual information |

2371 | between the random variables represented by the rows and the columns, |

2372 | since the test statistic and P-value are strictly increasing |

2373 | and strictly decreasing, respectively, in mutual information. Therefore, this |

2374 | function returns a GTestRes, which is a subtype of TestRes and also gives |

2375 | the mutual information for use in information theoretic settings. |

2376 | |

2377 | References: http://en.wikipedia.org/wiki/G_test, last retrieved 1/22/2011 |

2378 | |

2379 | */ |

2380 | GTestRes gTestContingency(T...)(T inputData) { |

2381 | return testContingency!(gTestElem, T)(inputData); |

2382 | } |

2383 | |

2384 | unittest { |

2385 | // Values from example at http://udel.edu/~mcdonald/statgtestind.html |

2386 | // Handbook of Biological Statistics. |

2387 | uint[] withoutCHD = [268, 199, 42]; |

2388 | uint[] withCHD = [807, 759, 184]; |

2389 | auto res = gTestContingency(withoutCHD, withCHD); |

2390 | assert(approxEqual(res.testStat, 7.3)); |

2391 | assert(approxEqual(res.p, 0.026)); |

2392 | assert(approxEqual(res.mutualInfo, 0.0023313)); |

2393 | |

2394 | |

2395 | uint[] moringa = [127, 99, 264]; |

2396 | uint[] vicinus = [116, 67, 161]; |

2397 | auto res2 = gTestContingency(moringa, vicinus); |

2398 | assert(approxEqual(res2.testStat, 6.23)); |

2399 | assert(approxEqual(res2.p, 0.044)); |

2400 | assert(approxEqual(res2.mutualInfo, 0.00538613)); |

2401 | } |

2402 | |

2403 | // For converting between base e and base 2 logarithms. |

2404 | private enum loge2 = 0.69314718055994530941723212145817656807550013436025525412; |

2405 | |

2406 | // Pearson and likelihood ratio code are pretty much the same. Factor out |

2407 | // the one difference into a function that's a template parameter. However, |

2408 | // for API simplicity, this is hidden and they look like two separate functions. |

2409 | private GTestRes testContingency(alias elemFun, T...)(T rangesIn) { |

2410 | mixin(newFrame); |

2411 | static if(isForwardRange!(T[0]) && T.length == 1 && |

2412 | isForwardRange!(typeof(rangesIn[0].front()))) { |

2413 | auto ranges = tempdup(rangesIn[0]); |

2414 | } else static if(allSatisfy!(isForwardRange, typeof(rangesIn))) { |

2415 | alias rangesIn ranges; |

2416 | } else { |

2417 | static assert(0, "Can only perform contingency table test" ~ |

2418 | " on a tuple of ranges or a range of ranges."); |

2419 | } |

2420 | |

2421 | double[] colSums = newStack!(double)(ranges.length); |

2422 | colSums[] = 0; |

2423 | size_t nCols = 0; |

2424 | immutable size_t nRows = ranges.length; |

2425 | foreach(ri, range; ranges) { |

2426 | size_t curLen = 0; |

2427 | foreach(elem; range.save) { |

2428 | colSums[ri] += cast(double) elem; |

2429 | curLen++; |

2430 | } |

2431 | if(ri == 0) { |

2432 | nCols = curLen; |

2433 | } else { |

2434 | assert(curLen == nCols); |

2435 | } |

2436 | } |

2437 | |

2438 | bool noneEmpty() { |

2439 | foreach(range; ranges) { |

2440 | if(range.empty) { |

2441 | return false; |

2442 | } |

2443 | } |

2444 | return true; |

2445 | } |

2446 | |

2447 | void popAll() { |

2448 | foreach(i, range; ranges) { |

2449 | ranges[i].popFront; |

2450 | } |

2451 | } |

2452 | |

2453 | double sumRow() { |

2454 | double rowSum = 0; |

2455 | foreach(range; ranges) { |

2456 | rowSum += cast(double) range.front; |

2457 | } |

2458 | return rowSum; |

2459 | } |

2460 | |

2461 | double chiSq = 0; |

2462 | immutable double NNeg1 = 1.0 / sum(colSums); |

2463 | while(noneEmpty) { |

2464 | auto rowSum = sumRow(); |

2465 | foreach(ri, range; ranges) { |

2466 | double expected = NNeg1 * rowSum * colSums[ri]; |

2467 | chiSq += elemFun(range.front, expected); |

2468 | } |

2469 | popAll(); |

2470 | } |

2471 | |

2472 | if(isNaN(chiSq)) { |

2473 | return GTestRes(TestRes(double.nan, double.nan), double.nan); |

2474 | } |

2475 | |

2476 | // This can happen in some cases due to numerical fuzz. |

2477 | if(chiSq > 1e-5 && chiSq <= 0) { |

2478 | return GTestRes(TestRes(0, 1), 0); |

2479 | } |

2480 | |

2481 | immutable pVal = chiSquareCDFR(chiSq, (nRows - 1) * (nCols - 1)); |

2482 | |

2483 | // 1 / (2 * loge2), for converting chiSq to mutualInfo. |

2484 | enum chiToMi = 1 / (2 * loge2); |

2485 | |

2486 | // This is the mutual information between the two random variables |

2487 | // represented by the contingency table, only if we're doing a G test. |

2488 | // If we're doing a Pearson's test, it's a completely meaningless quantity, |

2489 | // but never gets returned by any public function. |

2490 | immutable mutualInfo = chiSq * NNeg1 * chiToMi; |

2491 | |

2492 | return GTestRes(TestRes(chiSq, pVal), mutualInfo); |

2493 | } |

2494 | |

2495 | private double pearsonChiSqElem(double observed, double expected) pure nothrow { |

2496 | immutable diff = observed - expected; |

2497 | return diff * diff / expected; |

2498 | } |

2499 | |

2500 | private double gTestElem(double observed, double expected) pure nothrow { |

2501 | return (observed == 0 && expected > 0) ? 0 : |

2502 | (observed * log(observed / expected) * 2); |

2503 | } |

2504 | |

2505 | /** |

2506 | Given two vectors of observations of jointly distributed variables x, y, tests |

2507 | the null hypothesis that values in x are independent of the corresponding |

2508 | values in y. This is done using Pearson's Chi-Square Test. For a similar test |

2509 | that assumes the data has already been tabulated into a contingency table, see |

2510 | chiSquareContingency(). |

2511 | |

2512 | x and y must both be input ranges. If they are not the same length, an |

2513 | exception is thrown. |

2514 | |

2515 | Examples: |

2516 | --- |

2517 | // Test whether the appearance of "foo" vs. "bar" is independent of the |

2518 | // appearance of "baz" vs. "xxx". |

2519 | auto x = ["foo", "bar", "bar", "foo", "foo"]; |

2520 | auto y = ["xxx", "baz", "baz", "xxx", "baz"]; |

2521 | auto result = chiSquareObs(x, y); |

2522 | --- |

2523 | */ |

2524 | TestRes chiSquareObs(T, U)(T x, U y) |

2525 | if(isInputRange!T && isInputRange!U) { |

2526 | uint xFreedom, yFreedom, n; |

2527 | typeof(return) ret; |

2528 | |

2529 | static if(!dstats.base.hasLength!T && !dstats.base.hasLength!U) { |

2530 | ret.testStat = toContingencyScore!(T, U, uint) |

2531 | (x, y, &pearsonChiSqElem, xFreedom, yFreedom, n); |

2532 | } else { |

2533 | immutable minLen = min(x.length, y.length); |

2534 | if(minLen <= ubyte.max) { |

2535 | ret.testStat = toContingencyScore!(T, U, ubyte) |

2536 | (x, y, &pearsonChiSqElem, xFreedom, yFreedom, n); |

2537 | } else if(minLen <= ushort.max) { |

2538 | ret.testStat = toContingencyScore!(T, U, ushort) |

2539 | (x, y, &pearsonChiSqElem, xFreedom, yFreedom, n); |

2540 | } else { |

2541 | ret.testStat = toContingencyScore!(T, U, uint) |

2542 | (x, y, &pearsonChiSqElem, xFreedom, yFreedom, n); |

2543 | } |

2544 | } |

2545 | |

2546 | ret.p = chiSquareCDFR(ret.testStat, xFreedom * yFreedom); |

2547 | return ret; |

2548 | } |

2549 | |

2550 | unittest { |

2551 | // We know the chi-square contingency works, so test that the automatic |

2552 | // binning works, too. |

2553 | ubyte[] obs1 = [1, 2, 3, 1, 2, 3, 1, 2, 3]; |

2554 | ubyte[] obs2 = [1, 3, 2, 1, 3, 2, 1, 3, 2]; |

2555 | |

2556 | uint[][] cTable = [[3, 0, 0], |

2557 | [0, 0, 3], |

2558 | [0, 3, 0]]; |

2559 | auto gRes = chiSquareContingency(cTable); |

2560 | auto miRes = chiSquareObs(obs1, obs2); |

2561 | foreach(ti, elem; miRes.tupleof) { |

2562 | assert(approxEqual(elem, gRes.tupleof[ti])); |

2563 | } |

2564 | |

2565 | auto x = ["foo", "bar", "bar", "foo", "foo"]; |

2566 | auto y = ["xxx", "baz", "baz", "xxx", "baz"]; |

2567 | auto result = chiSquareObs(x, y); |

2568 | assert(approxEqual(result.testStat, 2.22222222)); |

2569 | assert(approxEqual(result.p, 0.136037)); |

2570 | } |

2571 | |

2572 | /** |

2573 | Given two vectors of observations of jointly distributed variables x, y, tests |

2574 | the null hypothesis that values in x are independent of the corresponding |

2575 | values in y. This is done using the Likelihood Ratio G test. Usage is similar |

2576 | to chiSquareObs. For an otherwise identical test that assumes the data has |

2577 | already been tabulated into a contingency table, see gTestContingency(). |

2578 | |

2579 | Note: This test can be thought of as a test for nonzero mutual information |

2580 | between x and y, since the test statistic and P-value are strictly increasing |

2581 | and strictly decreasing, respectively, in mutual information. Therefore, this |

2582 | function returns a GTestRes, which is a subtype of TestRes and also gives |

2583 | the mutual information for use in information theoretic settings. |

2584 | */ |

2585 | GTestRes gTestObs(T, U)(T x, U y) |

2586 | if(isInputRange!T && isInputRange!U) { |

2587 | uint xFreedom, yFreedom, n; |

2588 | typeof(return) ret; |

2589 | |

2590 | static if(!dstats.base.hasLength!T && !dstats.base.hasLength!U) { |

2591 | ret.testStat = toContingencyScore!(T, U, uint) |

2592 | (x, y, &gTestElem, xFreedom, yFreedom, n); |

2593 | } else { |

2594 | immutable minLen = min(x.length, y.length); |

2595 | if(minLen <= ubyte.max) { |

2596 | ret.testStat = toContingencyScore!(T, U, ubyte) |

2597 | (x, y, &gTestElem, xFreedom, yFreedom, n); |

2598 | } else if(minLen <= ushort.max) { |

2599 | ret.testStat = toContingencyScore!(T, U, ushort) |

2600 | (x, y, &gTestElem, xFreedom, yFreedom, n); |

2601 | } else { |

2602 | ret.testStat = toContingencyScore!(T, U, uint) |

2603 | (x, y, &gTestElem, xFreedom, yFreedom, n); |

2604 | } |

2605 | } |

2606 | |

2607 | ret.p = chiSquareCDFR(ret.testStat, xFreedom * yFreedom); |

2608 | ret.mutualInfo = ret.testStat / (2 * loge2 * n); |

2609 | return ret; |

2610 | } |

2611 | |

2612 | unittest { |

2613 | // We know the g test contingency works, so test that the automatic binning |

2614 | // works, too. |

2615 | ubyte[] obs1 = [1, 2, 3, 1, 2, 3, 1, 2, 3]; |

2616 | ubyte[] obs2 = [1, 3, 2, 1, 3, 2, 1, 3, 2]; |

2617 | |

2618 | uint[][] cTable = [[3, 0, 0], |

2619 | [0, 0, 3], |

2620 | [0, 3, 0]]; |

2621 | auto gRes = gTestContingency(cTable); |

2622 | auto miRes = gTestObs(obs1, obs2); |

2623 | foreach(ti, elem; miRes.tupleof) { |

2624 | assert(approxEqual(elem, gRes.tupleof[ti])); |

2625 | } |

2626 | |

2627 | auto x = ["foo", "bar", "bar", "foo", "foo"]; |

2628 | auto y = ["xxx", "baz", "baz", "xxx", "baz"]; |

2629 | auto result = gTestObs(x, y); |

2630 | assert(approxEqual(result.testStat, 2.91103)); |

2631 | assert(approxEqual(result.p, 0.0879755)); |

2632 | assert(approxEqual(result.mutualInfo, 0.419973)); |

2633 | } |

2634 | |

2635 | package double toContingencyScore(T, U, Uint) |

2636 | (T x, U y, double function(double, double) elemFun, |

2637 | out uint xFreedom, out uint yFreedom, out uint nPtr) { |

2638 | |

2639 | enum needsHeap = dstats.infotheory.NeedsHeap!T || |

2640 | dstats.infotheory.NeedsHeap!U; |

2641 | alias dstats.infotheory.ObsEnt!(ElementType!T, ElementType!U) ObsType; |

2642 | |

2643 | static if(needsHeap) { |

2644 | Uint[ObsType] jointCounts; |

2645 | Uint[ElementType!T] xCounts; |

2646 | Uint[ElementType!U] yCounts; |

2647 | } else { |

2648 | mixin(newFrame); |

2649 | dstatsEnforce(x.length == y.length, |

2650 | "Can't calculate mutual info with different length vectors."); |

2651 | immutable len = x.length; |

2652 | auto jointCounts = StackHash!(ObsType, Uint)(max(20, len / 20)); |

2653 | auto xCounts = StackHash!(ElementType!T, Uint)(max(10, len / 40)); |

2654 | auto yCounts = StackHash!(ElementType!U, Uint)(max(10, len / 40)); |

2655 | } |

2656 | |

2657 | uint n = 0; |

2658 | while(!x.empty && !y.empty) { |

2659 | n++; |

2660 | auto a = x.front; |

2661 | auto b = y.front; |

2662 | jointCounts[ObsType(a, b)]++; |

2663 | xCounts[a]++; |

2664 | yCounts[b]++; |

2665 | |

2666 | x.popFront(); |

2667 | y.popFront(); |

2668 | } |

2669 | |

2670 | dstatsEnforce(x.empty && y.empty, |

2671 | "Can't calculate mutual info with different length vectors."); |

2672 | |

2673 | xFreedom = cast(uint) xCounts.length - 1; |

2674 | yFreedom = cast(uint) yCounts.length - 1; |

2675 | nPtr = n; |

2676 | |

2677 | double ret = 0; |

2678 | immutable double nNeg1 = 1.0 / n; |

2679 | foreach(key1, marg1; xCounts) foreach(key2, marg2; yCounts) { |

2680 | immutable observed = jointCounts.get( |

2681 | ObsType(key1, key2), 0 |

2682 | ); |

2683 | immutable expected = marg1 * nNeg1 * marg2; |

2684 | ret += elemFun(observed, expected); |

2685 | } |

2686 | |

2687 | return ret; |

2688 | } |

2689 | |

2690 | /**Fisher's Exact test for difference in odds between rows/columns |

2691 | * in a 2x2 contingency table. Specifically, this function tests the odds |

2692 | * ratio, which is defined, for a contingency table c, as (c[0][0] * c[1][1]) |

2693 | * / (c[1][0] * c[0][1]). Alternatives are Alt.less, meaning true odds ratio |

2694 | * < 1, Alt.greater, meaning true odds ratio > 1, and Alt.twoSided, meaning |

2695 | * true odds ratio != 1. |

2696 | * |

2697 | * Accepts a 2x2 contingency table as an array of arrays of uints. |

2698 | * For now, only does 2x2 contingency tables. |

2699 | * |

2700 | * Notes: Although this test is "exact" in that it does not rely on asymptotic |

2701 | * approximations, it is very statistically conservative when the marginals |

2702 | * are not truly fixed in the experimental design in question. If a |

2703 | * closer but possibly non-conservative approximation of the true P-value is |

2704 | * desired, Pearson's chi-square test (chiSquareContingency) may perform better, |

2705 | * even for small samples. |

2706 | * |

2707 | * Returns: A TestRes of the odds ratio and the P-value against the given |

2708 | * alternative. |

2709 | * |

2710 | * Examples: |

2711 | * --- |

2712 | * double res = fisherExact([[2u, 7], [8, 2]], Alt.less); |

2713 | * assert(approxEqual(res.p, 0.01852)); // Odds ratio is very small in this case. |

2714 | * assert(approxEqual(res.testStat, 4.0 / 56.0)); |

2715 | * --- |

2716 | * |

2717 | * References: http://en.wikipedia.org/wiki/Fisher%27s_Exact_Test |

2718 | * |

2719 | */ |

2720 | TestRes fisherExact(T)(const T[2][2] contingencyTable, Alt alt = Alt.twoSided) |

2721 | if(isIntegral!(T)) { |

2722 | foreach(range; contingencyTable) { |

2723 | foreach(elem; range) { |

2724 | dstatsEnforce(elem >= 0, |

2725 | "Cannot have negative elements in a contingency table."); |

2726 | } |

2727 | } |

2728 | |

2729 | static double fisherLower(const T[2][2] contingencyTable) { |

2730 | alias contingencyTable c; |

2731 | return hypergeometricCDF(c[0][0], c[0][0] + c[0][1], c[1][0] + c[1][1], |

2732 | c[0][0] + c[1][0]); |

2733 | } |

2734 | |

2735 | static double fisherUpper(const T[2][2] contingencyTable) { |

2736 | alias contingencyTable c; |

2737 | return hypergeometricCDFR(c[0][0], c[0][0] + c[0][1], c[1][0] + c[1][1], |

2738 | c[0][0] + c[1][0]); |

2739 | } |

2740 | |

2741 | |

2742 | alias contingencyTable c; // Save typing. |

2743 | immutable oddsRatio = cast(double) c[0][0] * c[1][1] / c[0][1] / c[1][0]; |

2744 | if(alt == Alt.none) { |

2745 | return TestRes(oddsRatio); |

2746 | } else if(alt == Alt.less) { |

2747 | return TestRes(oddsRatio, fisherLower(contingencyTable)); |

2748 | } else if(alt == Alt.greater) { |

2749 | return TestRes(oddsRatio, fisherUpper(contingencyTable)); |

2750 | } |

2751 | |

2752 | |

2753 | immutable uint n1 = c[0][0] + c[0][1], |

2754 | n2 = c[1][0] + c[1][1], |

2755 | n = c[0][0] + c[1][0]; |

2756 | |

2757 | immutable uint mode = |

2758 | cast(uint) ((cast(double) (n + 1) * (n1 + 1)) / (n1 + n2 + 2)); |

2759 | immutable double pExact = hypergeometricPMF(c[0][0], n1, n2, n); |

2760 | immutable double pMode = hypergeometricPMF(mode, n1, n2, n); |

2761 | |

2762 | enum epsilon = 1 - 1e-5; |

2763 | if(approxEqual(pExact, pMode, 1 - epsilon)) { |

2764 | return TestRes(oddsRatio, 1); |

2765 | } else if(c[0][0] < mode) { |

2766 | immutable double pLower = hypergeometricCDF(c[0][0], n1, n2, n); |

2767 | |

2768 | if(hypergeometricPMF(n, n1, n2, n) > pExact / epsilon) { |

2769 | return TestRes(oddsRatio, pLower); |

2770 | } |

2771 | |

2772 | // Binary search for where to begin upper half. |

2773 | uint min = mode, max = n, guess = uint.max; |

2774 | while(max - min > 1) { |

2775 | guess = cast(uint) ( |

2776 | (max == min + 1 && guess == min) ? max : |

2777 | (cast(ulong) max + cast(ulong) min) / 2UL); |

2778 | |

2779 | immutable double pGuess = hypergeometricPMF(guess, n1, n2, n); |

2780 | if(pGuess <= pExact && |

2781 | hypergeometricPMF(guess - 1, n1, n2, n) > pExact) { |

2782 | break; |

2783 | } else if(pGuess < pExact) { |

2784 | max = guess; |

2785 | } else min = guess; |

2786 | } |

2787 | |

2788 | if(guess == uint.max) { |

2789 | guess = min; |

2790 | } |

2791 | |

2792 | while(guess > 0 && hypergeometricPMF(guess, n1, n2, n) < pExact * epsilon) { |

2793 | guess--; |

2794 | } |

2795 | |

2796 | while(hypergeometricPMF(guess, n1, n2, n) > pExact / epsilon) { |

2797 | guess++; |

2798 | } |

2799 | |

2800 | double p = std.algorithm.min(pLower + |

2801 | hypergeometricCDFR(guess, n1, n2, n), 1.0); |

2802 | return TestRes(oddsRatio, p); |

2803 | } else { |

2804 | immutable double pUpper = hypergeometricCDFR(c[0][0], n1, n2, n); |

2805 | |

2806 | if(hypergeometricPMF(0, n1, n2, n) > pExact / epsilon) { |

2807 | return TestRes(oddsRatio, pUpper); |

2808 | } |

2809 | |

2810 | // Binary search for where to begin lower half. |

2811 | uint min = 0, max = mode, guess = uint.max; |

2812 | while(max - min > 1) { |

2813 | guess = cast(uint) ( |

2814 | (max == min + 1 && guess == min) ? max : |

2815 | (cast(ulong) max + cast(ulong) min) / 2UL); |

2816 | immutable double pGuess = hypergeometricPMF(guess, n1, n2, n); |

2817 | |

2818 | if(pGuess <= pExact && |

2819 | hypergeometricPMF(guess + 1, n1, n2, n) > pExact) { |

2820 | break; |

2821 | } else if(pGuess <= pExact) { |

2822 | min = guess; |

2823 | } else max = guess; |

2824 | } |

2825 | |

2826 | if(guess == uint.max) { |

2827 | guess = min; |

2828 | } |

2829 | |

2830 | while(hypergeometricPMF(guess, n1, n2, n) < pExact * epsilon) { |

2831 | guess++; |

2832 | } |

2833 | |

2834 | while(guess > 0 && |

2835 | hypergeometricPMF(guess, n1, n2, n) > pExact / epsilon) { |

2836 | guess--; |

2837 | } |

2838 | |

2839 | double p = std.algorithm.min(pUpper + |

2840 | hypergeometricCDF(guess, n1, n2, n), 1.0); |

2841 | return TestRes(oddsRatio, p); |

2842 | } |

2843 | } |

2844 | |

2845 | /**Convenience function. Converts a dynamic array to a static one, then |

2846 | * calls the overload.*/ |

2847 | TestRes fisherExact(T)(const T[][] contingencyTable, Alt alt = Alt.twoSided) |

2848 | if(isIntegral!(T)) { |

2849 | dstatsEnforce(contingencyTable.length == 2 && |

2850 | contingencyTable[0].length == 2 && |

2851 | contingencyTable[1].length == 2, |

2852 | "Fisher exact only supports 2x2 tables."); |

2853 | |

2854 | T[2][2] newTable; |

2855 | newTable[0][0] = contingencyTable[0][0]; |

2856 | newTable[0][1] = contingencyTable[0][1]; |

2857 | newTable[1][1] = contingencyTable[1][1]; |

2858 | newTable[1][0] = contingencyTable[1][0]; |

2859 | return fisherExact(newTable, alt); |

2860 | } |

2861 | |

2862 | unittest { |

2863 | // Simple, naive impl. of two-sided to test against. |

2864 | static double naive(const uint[][] c) { |

2865 | immutable uint n1 = c[0][0] + c[0][1], |

2866 | n2 = c[1][0] + c[1][1], |

2867 | n = c[0][0] + c[1][0]; |

2868 | immutable uint mode = |

2869 | cast(uint) ((cast(double) (n + 1) * (n1 + 1)) / (n1 + n2 + 2)); |

2870 | immutable double pExact = hypergeometricPMF(c[0][0], n1, n2, n); |

2871 | immutable double pMode = hypergeometricPMF(mode, n1, n2, n); |

2872 | if(approxEqual(pExact, pMode, 1e-7)) |

2873 | return 1; |

2874 | double sum = 0; |

2875 | foreach(i; 0..n + 1) { |

2876 | double pCur = hypergeometricPMF(i, n1, n2, n); |

2877 | if(pCur <= pExact / (1 - 1e-5)) |

2878 | sum += pCur; |

2879 | } |

2880 | return sum; |

2881 | } |

2882 | |

2883 | uint[][] c = new uint[][](2, 2); |

2884 | |

2885 | foreach(i; 0..100_000) { |

2886 | c[0][0] = uniform(0U, 51U); |

2887 | c[0][1] = uniform(0U, 51U); |

2888 | c[1][0] = uniform(0U, 51U); |

2889 | c[1][1] = uniform(0U, 51U); |

2890 | double naiveAns = naive(c); |

2891 | double fastAns = fisherExact(c); |

2892 | assert(approxEqual(naiveAns, fastAns), text(c, naiveAns, fastAns)); |

2893 | } |

2894 | |

2895 | auto res = fisherExact([[19000, 80000], [20000, 90000]]); |

2896 | assert(approxEqual(res.testStat, 1.068731)); |

2897 | assert(approxEqual(res, 3.319e-9)); |

2898 | res = fisherExact([[18000, 80000], [20000, 90000]]); |

2899 | assert(approxEqual(res, 0.2751)); |

2900 | res = fisherExact([[14500, 20000], [30000, 40000]]); |

2901 | assert(approxEqual(res, 0.01106)); |

2902 | res = fisherExact([[100, 2], [1000, 5]]); |

2903 | assert(approxEqual(res, 0.1301)); |

2904 | res = fisherExact([[2, 7], [8, 2]]); |

2905 | assert(approxEqual(res, 0.0230141)); |

2906 | res = fisherExact([[5, 1], [10, 10]]); |

2907 | assert(approxEqual(res, 0.1973244)); |

2908 | res = fisherExact([[5, 15], [20, 20]]); |

2909 | assert(approxEqual(res, 0.0958044)); |

2910 | res = fisherExact([[5, 16], [20, 25]]); |

2911 | assert(approxEqual(res, 0.1725862)); |

2912 | res = fisherExact([[10, 5], [10, 1]]); |

2913 | assert(approxEqual(res, 0.1973244)); |

2914 | res = fisherExact([[5, 0], [1, 4]]); |

2915 | assert(approxEqual(res.p, 0.04761904)); |

2916 | res = fisherExact([[0, 1], [3, 2]]); |

2917 | assert(approxEqual(res.p, 1.0)); |

2918 | res = fisherExact([[0, 2], [6, 4]]); |

2919 | assert(approxEqual(res.p, 0.4545454545)); |

2920 | res = fisherExact([[2, 7], [8, 2]], Alt.less); |

2921 | assert(approxEqual(res, 0.01852)); |

2922 | res = fisherExact([[5, 1], [10, 10]], Alt.less); |

2923 | assert(approxEqual(res, 0.9783)); |

2924 | res = fisherExact([[5, 15], [20, 20]], Alt.less); |

2925 | assert(approxEqual(res, 0.05626)); |

2926 | res = fisherExact([[5, 16], [20, 25]], Alt.less); |

2927 | assert(approxEqual(res, 0.08914)); |

2928 | res = fisherExact([[2, 7], [8, 2]], Alt.greater); |

2929 | assert(approxEqual(res, 0.999)); |

2930 | res = fisherExact([[5, 1], [10, 10]], Alt.greater); |

2931 | assert(approxEqual(res, 0.1652)); |

2932 | res = fisherExact([[5, 15], [20, 20]], Alt.greater); |

2933 | assert(approxEqual(res, 0.985)); |

2934 | res = fisherExact([[5, 16], [20, 25]], Alt.greater); |

2935 | assert(approxEqual(res, 0.9723)); |

2936 | } |

2937 | |

2938 | /**Performs a Kolmogorov-Smirnov (K-S) 2-sample test. The K-S test is a |

2939 | * non-parametric test for a difference between two empirical distributions or |

2940 | * between an empirical distribution and a reference distribution. |

2941 | * |

2942 | * Returns: A TestRes with the K-S D value and a P value for the null that |

2943 | * FPrime is distributed identically to F against the alternative that it isn't. |

2944 | * This implementation uses a signed D value to indicate the direction of the |

2945 | * difference between distributions. To get the D value used in standard |

2946 | * notation, simply take the absolute value of this D value. |

2947 | * |

2948 | * Bugs: Exact calculation not implemented. Uses asymptotic approximation. |

2949 | * |

2950 | * References: http://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Smirnov_test |

2951 | */ |

2952 | TestRes ksTest(T, U)(T F, U Fprime) |

2953 | if(doubleInput!(T) && doubleInput!(U)) { |

2954 | double D = ksTestD(F, Fprime); |

2955 | return TestRes(D, ksPval(F.length, Fprime.length, D)); |

2956 | } |

2957 | |

2958 | unittest { |

2959 | assert(approxEqual(ksTest([1,2,3,4,5].dup, [1,2,3,4,5].dup).testStat, 0)); |

2960 | assert(approxEqual(ksTestDestructive([1,2,3,4,5].dup, [1,2,2,3,5].dup).testStat, -.2)); |

2961 | assert(approxEqual(ksTest([-1,0,2,8, 6].dup, [1,2,2,3,5].dup).testStat, .4)); |

2962 | assert(approxEqual(ksTest([1,2,3,4,5].dup, [1,2,2,3,5,7,8].dup).testStat, .2857)); |

2963 | assert(approxEqual(ksTestDestructive([1, 2, 3, 4, 4, 4, 5].dup, |

2964 | [1, 2, 3, 4, 5, 5, 5].dup).testStat, .2857)); |

2965 | |

2966 | assert(approxEqual(ksTest([1, 2, 3, 4, 4, 4, 5].dup, [1, 2, 3, 4, 5, 5, 5].dup), |

2967 | .9375)); |

2968 | assert(approxEqual(ksTestDestructive([1, 2, 3, 4, 4, 4, 5].dup, |

2969 | [1, 2, 3, 4, 5, 5, 5].dup), .9375)); |

2970 | } |

2971 | |

2972 | template isArrayLike(T) { |

2973 | enum bool isArrayLike = hasSwappableElements!(T) && hasAssignableElements!(T) |

2974 | && dstats.base.hasLength!(T) && isRandomAccessRange!(T); |

2975 | } |

2976 | |

2977 | /**One-sample KS test against a reference distribution, doesn't modify input |

2978 | * data. Takes a function pointer or delegate for the CDF of refernce |

2979 | * distribution. |

2980 | * |

2981 | * Returns: A TestRes with the K-S D value and a P value for the null that |

2982 | * Femp is a sample from F against the alternative that it isn't. This |

2983 | * implementation uses a signed D value to indicate the direction of the |

2984 | * difference between distributions. To get the D value used in standard |

2985 | * notation, simply take the absolute value of this D value. |

2986 | * |

2987 | * Bugs: Exact calculation not implemented. Uses asymptotic approximation. |

2988 | * |

2989 | * Examples: |

2990 | * --- |

2991 | * auto stdNormal = parametrize!(normalCDF)(0.0, 1.0); |

2992 | * auto empirical = [1, 2, 3, 4, 5]; |

2993 | * double res = ksTest(empirical, stdNormal); |

2994 | * --- |

2995 | * |

2996 | * References: http://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Smirnov_test |

2997 | * |

2998 | */ |

2999 | TestRes ksTest(T, Func)(T Femp, Func F) |

3000 | if(doubleInput!(T) && is(ReturnType!(Func) : double)) { |

3001 | double D = ksTestD(Femp, F); |

3002 | return TestRes(D, ksPval(Femp.length, D)); |

3003 | } |

3004 | |

3005 | unittest { |

3006 | auto stdNormal = paramFunctor!(normalCDF)(0.0, 1.0); |

3007 | assert(approxEqual(ksTest([1,2,3,4,5].dup, stdNormal).testStat, -.8413)); |

3008 | assert(approxEqual(ksTestDestructive([-1,0,2,8, 6].dup, stdNormal).testStat, -.5772)); |

3009 | auto lotsOfTies = [5,1,2,2,2,2,2,2,3,4].dup; |

3010 | assert(approxEqual(ksTest(lotsOfTies, stdNormal).testStat, -0.8772)); |

3011 | |

3012 | assert(approxEqual(ksTest([0,1,2,3,4].dup, stdNormal), .03271)); |

3013 | |

3014 | auto uniform01 = parametrize!(uniformCDF)(0, 1); |

3015 | assert(approxEqual(ksTestDestructive([0.1, 0.3, 0.5, 0.9, 1].dup, uniform01), 0.7591)); |

3016 | |

3017 | } |

3018 | |

3019 | /**Same as ksTest, except sorts in place, avoiding memory allocations.*/ |

3020 | TestRes ksTestDestructive(T, U)(T F, U Fprime) |

3021 | if(isArrayLike!(T) && isArrayLike!(U)) { |

3022 | double D = ksTestDDestructive(F, Fprime); |

3023 | return TestRes(D, ksPval(F.length, Fprime.length, D)); |

3024 | } |

3025 | |

3026 | ///Ditto. |

3027 | TestRes ksTestDestructive(T, Func)(T Femp, Func F) |

3028 | if(isArrayLike!(T) && is(ReturnType!Func : double)) { |

3029 | double D = ksTestDDestructive(Femp, F); |

3030 | return TestRes(D, ksPval(Femp.length, D)); |

3031 | } |

3032 | |

3033 | private double ksTestD(T, U)(T F, U Fprime) |

3034 | if(isInputRange!(T) && isInputRange!(U)) { |

3035 | auto TAState = TempAlloc.getState; |

3036 | scope(exit) { |

3037 | TempAlloc.free(TAState); |

3038 | TempAlloc.free(TAState); |

3039 | } |

3040 | return ksTestDDestructive(tempdup(F), tempdup(Fprime)); |

3041 | } |

3042 | |

3043 | private double ksTestDDestructive(T, U)(T F, U Fprime) |

3044 | if(isArrayLike!(T) && isArrayLike!(U)) { |

3045 | qsort(F); |

3046 | qsort(Fprime); |

3047 | double D = 0; |

3048 | size_t FprimePos = 0; |

3049 | foreach(i; 0..2) { //Test both w/ Fprime x vals, F x vals. |

3050 | double diffMult = (i == 0) ? 1 : -1; |

3051 | foreach(FPos, Xi; F) { |

3052 | if(FPos < F.length - 1 && F[FPos + 1] == Xi) |

3053 | continue; //Handle ties. |

3054 | while(FprimePos < Fprime.length && Fprime[FprimePos] <= Xi) { |

3055 | FprimePos++; |

3056 | } |

3057 | double diff = diffMult * (cast(double) (FPos + 1) / F.length - |

3058 | cast(double) FprimePos / Fprime.length); |

3059 | if(abs(diff) > abs(D)) |

3060 | D = diff; |

3061 | } |

3062 | swap(F, Fprime); |

3063 | FprimePos = 0; |

3064 | } |

3065 | return D; |

3066 | } |

3067 | |

3068 | private double ksTestD(T, Func)(T Femp, Func F) |

3069 | if(doubleInput!(T) && is(ReturnType!Func : double)) { |

3070 | scope(exit) TempAlloc.free; |

3071 | return ksTestDDestructive(tempdup(Femp), F); |

3072 | } |

3073 | |

3074 | private double ksTestDDestructive(T, Func)(T Femp, Func F) |

3075 | if(isArrayLike!(T) && is(ReturnType!Func : double)) { |

3076 | qsort(Femp); |

3077 | double D = 0; |

3078 | |

3079 | foreach(FPos, Xi; Femp) { |

3080 | double diff = cast(double) FPos / Femp.length - F(Xi); |

3081 | if(abs(diff) > abs(D)) |

3082 | D = diff; |

3083 | } |

3084 | |

3085 | return D; |

3086 | } |

3087 | |

3088 | private double ksPval(ulong N, ulong Nprime, double D) |

3089 | in { |

3090 | assert(D >= -1); |

3091 | assert(D <= 1); |

3092 | } body { |

3093 | return 1 - kolmDist(sqrt(cast(double) (N * Nprime) / (N + Nprime)) * abs(D)); |

3094 | } |

3095 | |

3096 | private double ksPval(ulong N, double D) |

3097 | in { |

3098 | assert(D >= -1); |

3099 | assert(D <= 1); |

3100 | } body { |

3101 | return 1 - kolmDist(abs(D) * sqrt(cast(double) N)); |

3102 | } |

3103 | |

3104 | /**Wald-wolfowitz or runs test for randomness of the distribution of |

3105 | * elements for which positive() evaluates to true. For example, given |

3106 | * a sequence of coin flips [H,H,H,H,H,T,T,T,T,T] and a positive() function of |

3107 | * "a == 'H'", this test would determine that the heads are non-randomly |

3108 | * distributed, since they are all at the beginning of obs. This is done |

3109 | * by counting the number of runs of consecutive elements for which |

3110 | * positive() evaluates to true, and the number of consecutive runs for which |

3111 | * it evaluates to false. In the example above, we have 2 runs. These are the |

3112 | * block of 5 consecutive heads at the beginning and the 5 consecutive tails |

3113 | * at the end. |

3114 | * |

3115 | * Alternatives are Alt.less, meaning that less runs than expected have been |

3116 | * observed and data for which positive() is true tends to cluster, |

3117 | * Alt.greater, which means that more runs than expected have been observed |

3118 | * and data for which positive() is true tends to not cluster even moreso than |

3119 | * expected by chance, and Alt.twoSided, meaning that elements for which |

3120 | * positive() is true cluster as much as expected by chance. |

3121 | * |

3122 | * Bugs: No exact calculation of the P-value. Asymptotic approximation only. |

3123 | * |

3124 | * References: http://en.wikipedia.org/wiki/Runs_test |

3125 | * |

3126 | */ |

3127 | double runsTest(alias positive = "a > 0", T)(T obs, Alt alt = Alt.twoSided) |

3128 | if(isIterable!(T)) { |

3129 | RunsTest!(positive, IterType!(T)) r; |

3130 | foreach(elem; obs) { |

3131 | r.put(elem); |

3132 | } |

3133 | return r.p(alt); |

3134 | } |

3135 | |

3136 | unittest { |

3137 | // Values from R lawstat package, for which "a < median(data)" is |

3138 | // hard-coded as the equivalent to positive(). The median of this data |

3139 | // is 0.5, so everything works. |

3140 | immutable int[] data = [1,0,0,0,1,1,0,0,1,0,1,0,1,0,1,1,1,0,0,1].idup; |

3141 | assert(approxEqual(runsTest(data), 0.3581)); |

3142 | assert(approxEqual(runsTest(data, Alt.less), 0.821)); |

3143 | assert(approxEqual(runsTest(data, Alt.greater), 0.1791)); |

3144 | } |

3145 | |

3146 | /**Runs test as in runsTest(), except calculates online instead of from stored |

3147 | * array elements.*/ |

3148 | struct RunsTest(alias positive = "a > 0", T) { |

3149 | private: |

3150 | uint nPos; |

3151 | uint nNeg; |

3152 | uint nRun; |

3153 | bool lastPos; |

3154 | |

3155 | alias unaryFun!(positive) pos; |

3156 | |

3157 | public: |

3158 | |

3159 | /// |

3160 | void put(T elem) { |

3161 | bool curPos = pos(elem); |

3162 | if(nRun == 0) { |

3163 | nRun = 1; |

3164 | if(curPos) { |

3165 | nPos++; |

3166 | } else { |

3167 | nNeg++; |

3168 | } |

3169 | } else if(pos(elem)) { |

3170 | nPos++; |

3171 | if(!lastPos) { |

3172 | nRun++; |

3173 | } |

3174 | } else { |

3175 | nNeg++; |

3176 | if(lastPos) { |

3177 | nRun++; |

3178 | } |

3179 | } |

3180 | lastPos = curPos; |

3181 | } |

3182 | |

3183 | /// |

3184 | uint nRuns() { |

3185 | return nRun; |

3186 | } |

3187 | |

3188 | /// |

3189 | double p(Alt alt = Alt.twoSided) { |

3190 | uint N = nPos + nNeg; |

3191 | double expected = 2.0 * nPos * nNeg / N + 1; |

3192 | double sd = sqrt((expected - 1) * (expected - 2) / (N - 1)); |

3193 | if(alt == Alt.less) { |

3194 | return normalCDF(nRun, expected, sd); |

3195 | } else if(alt == Alt.greater) { |

3196 | return normalCDFR(nRun, expected, sd); |

3197 | } else { |

3198 | return 2 * ((nRun < expected) ? |

3199 | normalCDF(nRun, expected, sd) : |

3200 | normalCDFR(nRun, expected, sd)); |

3201 | } |

3202 | } |

3203 | } |

3204 | |

3205 | // Aliases for old names for correlation tests. |

3206 | alias pearsonCorTest pcorTest; |

3207 | alias spearmanCorTest scorTest; |

3208 | alias kendallCorTest kcorTest; |

3209 | |

3210 | /**Tests the hypothesis that the Pearson correlation between two ranges is |

3211 | * different from some 0. Alternatives are |

3212 | * Alt.less (pearsonCor(range1, range2) < 0), Alt.greater (pearsonCor(range1, range2) |

3213 | * > 0) and Alt.twoSided (pearsonCor(range1, range2) != 0). |

3214 | * |

3215 | * Returns: A ConfInt of the estimated Pearson correlation of the two ranges, |

3216 | * the P-value against the given alternative, and the confidence interval of |

3217 | * the correlation at the level specified by confLevel. |

3218 | * |

3219 | * References: http://en.wikipedia.org/wiki/Pearson_correlation |

3220 | */ |

3221 | ConfInt pearsonCorTest(T, U)(T range1, U range2, Alt alt = Alt.twoSided, double confLevel = 0.95) |

3222 | if(doubleInput!(T) && doubleInput!(U)) { |

3223 | enforceConfidence(confLevel); |

3224 | |

3225 | PearsonCor pearsonRes = pearsonCor(range1, range2); |

3226 | if(isNaN(pearsonRes.cor)) { |

3227 | return ConfInt.init; |

3228 | } |

3229 | |

3230 | return pearsonCorTest(pearsonRes.cor, pearsonRes.N, alt, confLevel); |

3231 | } |

3232 | |

3233 | /**Same as overload, but uses pre-computed correlation coefficient and sample |

3234 | * size instead of computing them. |

3235 | * |

3236 | * Note: T must be a numeric type. The only reason this is a template and |

3237 | * not a plain old function is DMD bug 2972. |

3238 | */ |

3239 | ConfInt pearsonCorTest(T)(double cor, T N, Alt alt = Alt.twoSided, double confLevel = 0.95) |

3240 | if(isNumeric!(T)) { |

3241 | dstatsEnforce(N >= 0, "N must be >= 0 for pearsonCorTest."); |

3242 | dstatsEnforce(cor > -1.0 || approxEqual(cor, -1.0), |

3243 | "Correlation must be between 0, 1."); |

3244 | dstatsEnforce(cor < 1.0 || approxEqual(cor, 1.0), |

3245 | "Correlation must be between 0, 1."); |

3246 | enforceConfidence(confLevel); |

3247 | |

3248 | immutable double denom = sqrt((1 - cor * cor) / (N - 2)); |

3249 | immutable double t = cor / denom; |

3250 | ConfInt ret; |

3251 | ret.testStat = cor; |

3252 | |

3253 | double sqN, z; |

3254 | if(confLevel > 0) { |

3255 | sqN = sqrt(N - 3.0); |

3256 | z = sqN * atanh(cor); |

3257 | } |

3258 | |

3259 | final switch(alt) { |

3260 | case Alt.none : |

3261 | return ret; |

3262 | case Alt.twoSided: |

3263 | ret.p = (abs(cor) >= 1) ? 0 : |

3264 | 2 * ((t < 0) ? studentsTCDF(t, N - 2) : studentsTCDFR(t, N - 2)); |

3265 | |

3266 | if(confLevel > 0) { |

3267 | double deltaZ = invNormalCDF(0.5 * (1 - confLevel)); |

3268 | ret.lowerBound = tanh((z + deltaZ) / sqN); |

3269 | ret.upperBound = tanh((z - deltaZ) / sqN); |

3270 | } else { |

3271 | ret.lowerBound = cor; |

3272 | ret.upperBound = cor; |

3273 | } |

3274 | |

3275 | break; |

3276 | case Alt.less: |

3277 | if(cor >= 1) { |

3278 | ret.p = 1; |

3279 | } else if(cor <= -1) { |

3280 | ret.p = 0; |

3281 | } else { |

3282 | ret.p = studentsTCDF(t, N - 2); |

3283 | } |

3284 | |

3285 | if(confLevel > 0) { |

3286 | double deltaZ = invNormalCDF(1 - confLevel); |

3287 | ret.lowerBound = -1; |

3288 | ret.upperBound = tanh((z - deltaZ) / sqN); |

3289 | } else { |

3290 | ret.lowerBound = -1; |

3291 | ret.upperBound = cor; |

3292 | } |

3293 | |

3294 | break; |

3295 | case Alt.greater: |

3296 | if(cor >= 1) { |

3297 | ret.p = 0; |

3298 | } else if(cor <= -1) { |

3299 | ret.p = 1; |

3300 | } else { |

3301 | ret.p = studentsTCDFR(t, N - 2); |

3302 | } |

3303 | |

3304 | if(confLevel > 0) { |

3305 | double deltaZ = invNormalCDF(1 - confLevel); |

3306 | ret.lowerBound = tanh((z + deltaZ) / sqN); |

3307 | ret.upperBound = 1; |

3308 | } else { |

3309 | ret.lowerBound = cor; |

3310 | ret.upperBound = 1; |

3311 | } |

3312 | |

3313 | break; |

3314 | } |

3315 | return ret; |

3316 | } |

3317 | |

3318 | unittest { |

3319 | // Values from R. |

3320 | auto t1 = pearsonCorTest([1,2,3,4,5].dup, [2,1,4,3,5].dup, Alt.twoSided); |

3321 | auto t2 = pearsonCorTest([1,2,3,4,5].dup, [2,1,4,3,5].dup, Alt.less); |

3322 | auto t3 = pearsonCorTest([1,2,3,4,5].dup, [2,1,4,3,5].dup, Alt.greater); |

3323 | |

3324 | assert(approxEqual(t1.testStat, 0.8)); |

3325 | assert(approxEqual(t2.testStat, 0.8)); |

3326 | assert(approxEqual(t3.testStat, 0.8)); |

3327 | |

3328 | assert(approxEqual(t1.p, 0.1041)); |

3329 | assert(approxEqual(t2.p, 0.948)); |

3330 | assert(approxEqual(t3.p, 0.05204)); |

3331 | |

3332 | assert(approxEqual(t1.lowerBound, -0.2796400)); |

3333 | assert(approxEqual(t3.lowerBound, -0.06438567)); |

3334 | assert(approxEqual(t2.lowerBound, -1)); |

3335 | |

3336 | assert(approxEqual(t1.upperBound, 0.9861962)); |

3337 | assert(approxEqual(t2.upperBound, 0.9785289)); |

3338 | assert(approxEqual(t3.upperBound, 1)); |

3339 | |

3340 | // Test special case hack for cor = +- 1. |

3341 | uint[] myArr = [1,2,3,4,5]; |

3342 | uint[] myArrReverse = myArr.dup; |

3343 | reverse(myArrReverse); |

3344 | |

3345 | auto t4 = pearsonCorTest(myArr, myArr, Alt.twoSided); |

3346 | auto t5 = pearsonCorTest(myArr, myArr, Alt.less); |

3347 | auto t6 = pearsonCorTest(myArr, myArr, Alt.greater); |

3348 | assert(approxEqual(t4.testStat, 1)); |

3349 | assert(t4.p == 0); |

3350 | assert(t5.p == 1); |

3351 | assert(t6.p == 0); |

3352 | |

3353 | auto t7 = pearsonCorTest(myArr, myArrReverse, Alt.twoSided); |

3354 | auto t8 = pearsonCorTest(myArr, myArrReverse, Alt.less); |

3355 | auto t9 = pearsonCorTest(myArr, myArrReverse, Alt.greater); |

3356 | assert(approxEqual(t7.testStat, -1)); |

3357 | assert(t7.p == 0); |

3358 | assert(t8.p == 0); |

3359 | assert(t9.p == 1); |

3360 | } |

3361 | |

3362 | /**Tests the hypothesis that the Spearman correlation between two ranges is |

3363 | * different from some 0. Alternatives are |

3364 | * Alt.less (spearmanCor(range1, range2) < 0), Alt.greater (spearmanCor(range1, range2) |

3365 | * > 0) and Alt.twoSided (spearmanCor(range1, range2) != 0). |

3366 | * |

3367 | * Returns: A TestRes containing the Spearman correlation coefficient and |

3368 | * the P-value for the given alternative. |

3369 | * |

3370 | * Bugs: Exact P-value computation not yet implemented. Uses asymptotic |

3371 | * approximation only. This is good enough for most practical purposes given |

3372 | * reasonably large N, but is not perfectly accurate. Not valid for data with |

3373 | * very large amounts of ties. */ |

3374 | TestRes spearmanCorTest(T, U)(T range1, U range2, Alt alt = Alt.twoSided) |

3375 | if(isInputRange!(T) && isInputRange!(U) && |

3376 | is(typeof(range1.front < range1.front) == bool) && |

3377 | is(typeof(range2.front < range2.front) == bool)) { |

3378 | |

3379 | static if(!dstats.base.hasLength!T) { |

3380 | auto r1 = tempdup(range1); |

3381 | scope(exit) TempAlloc.free(); |

3382 | } else { |

3383 | alias range1 r1; |

3384 | } |

3385 | double N = r1.length; |

3386 | |

3387 | return pearsonCorTest(spearmanCor(range1, range2), N, alt, 0); |

3388 | } |

3389 | |

3390 | unittest { |

3391 | // Values from R. |

3392 | int[] arr1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]; |

3393 | int[] arr2 = [8,6,7,5,3,0,9,8,6,7,5,3,0,9,3,6,2,4,3,6,8]; |

3394 | auto t1 = spearmanCorTest(arr1, arr2, Alt.twoSided); |

3395 | auto t2 = spearmanCorTest(arr1, arr2, Alt.less); |

3396 | auto t3 = spearmanCorTest(arr1, arr2, Alt.greater); |

3397 | |

3398 | assert(approxEqual(t1.testStat, -0.1769406)); |

3399 | assert(approxEqual(t2.testStat, -0.1769406)); |

3400 | assert(approxEqual(t3.testStat, -0.1769406)); |

3401 | |

3402 | assert(approxEqual(t1.p, 0.4429)); |

3403 | assert(approxEqual(t3.p, 0.7785)); |

3404 | assert(approxEqual(t2.p, 0.2215)); |

3405 | } |

3406 | |

3407 | /**Tests the hypothesis that the Kendall Tau-b between two ranges is |

3408 | * different from some 0. Alternatives are |

3409 | * Alt.less (kendallCor(range1, range2) < 0), Alt.greater (kendallCor(range1, range2) |

3410 | * > 0) and Alt.twoSided (kendallCor(range1, range2) != 0). |

3411 | * |

3412 | * exactThresh controls the maximum length of the range for which exact P-value |

3413 | * computation is used. The default is 50. Exact calculation is never used |

3414 | * when ties are present because it is not computationally feasible. |

3415 | * Do not set this higher than 100, as it will be very slow |

3416 | * and the asymptotic approximation is pretty good at even a fraction of this |

3417 | * size. |

3418 | * |

3419 | * Returns: A TestRes containing the Kendall correlation coefficient and |

3420 | * the P-value for the given alternative. |

3421 | * |

3422 | * References: StackOverflow Question 948341 (http://stackoverflow.com/questions/948341) |

3423 | * |

3424 | * The Variance of Tau When Both Rankings Contain Ties. M.G. Kendall. |

3425 | * Biometrika, Vol 34, No. 3/4 (Dec., 1947), pp. 297-298 |

3426 | */ |

3427 | TestRes kendallCorTest(T, U)(T range1, U range2, Alt alt = Alt.twoSided, uint exactThresh = 50) |

3428 | if(isInputRange!(T) && isInputRange!(U)) { |

3429 | mixin(newFrame); |

3430 | auto i1d = tempdup(range1); |

3431 | auto i2d = tempdup(range2); |

3432 | immutable res = kendallCorDestructiveLowLevel(i1d, i2d, true); |

3433 | immutable double n = i1d.length; |

3434 | |

3435 | immutable double var = |

3436 | (2.0 / 9) * n * (n - 1) * (2 * n + 5) |

3437 | - (2.0 / 9) * res.tieCorrectT1 |

3438 | - (2.0 / 9) * res.tieCorrectU1 |

3439 | + (4 / (9 * n * (n - 1) * (n - 2))) * res.tieCorrectT2 * res.tieCorrectU2 |

3440 | + 2 / (n * (n - 1)) * res.tieCorrectT3 * res.tieCorrectU3; |

3441 | |

3442 | // Need the / 2 to change C, as used in Kendall's paper to S, as used here. |

3443 | immutable double sd = sqrt(var) / 2; |

3444 | |

3445 | enum double cc = 1; |

3446 | auto tau = res.tau; |

3447 | auto s = res.s; |

3448 | |

3449 | immutable bool noTies = res.tieCorrectT1 == 0 && res.tieCorrectU1 == 0; |

3450 | |

3451 | if(noTies && n <= exactThresh) { |

3452 | // More than uint.max data points for exact calculation is |

3453 | // not plausible. |

3454 | assert(i1d.length < uint.max); |

3455 | immutable N = cast(uint) i1d.length; |

3456 | immutable nSwaps = (N * (N - 1) / 2 - cast(uint) s) / 2; |

3457 | return TestRes(tau, kendallCorExactP(N, nSwaps, alt)); |

3458 | } |

3459 | |

3460 | final switch(alt) { |

3461 | case Alt.none : |

3462 | return TestRes(tau); |

3463 | case Alt.twoSided: |

3464 | if(abs(s) <= cc) { |

3465 | return TestRes(tau, 1); |

3466 | } else if(s < 0) { |

3467 | return TestRes(tau, 2 * normalCDF(s + cc, 0, sd)); |

3468 | } else { |

3469 | assert(s > 0); |

3470 | return TestRes(tau, 2 * normalCDFR(s - cc, 0, sd)); |

3471 | } |

3472 | assert(0); |

3473 | |

3474 | case Alt.less: |

3475 | return TestRes(tau, normalCDF(s + cc, 0, sd)); |

3476 | case Alt.greater: |

3477 | return TestRes(tau, normalCDFR(s - cc, 0, sd)); |

3478 | } |

3479 | } |

3480 | |

3481 | // Dynamic programming algorithm for computing exact Kendall tau P-values. |

3482 | // Thanks to ShreevatsaR from StackOverflow. |

3483 | private double kendallCorExactP(uint N, uint swaps, Alt alt) { |

3484 | uint maxSwaps = N * (N - 1) / 2; |

3485 | assert(swaps <= maxSwaps); |

3486 | immutable expectedSwaps = cast(ulong) N * (N - 1) * 0.25; |

3487 | if(alt == Alt.greater) { |

3488 | if(swaps > expectedSwaps) { |

3489 | if(swaps == maxSwaps) { |

3490 | return 1; |

3491 | } |

3492 | return 1.0 - kendallCorExactP(N, maxSwaps - swaps - 1, Alt.greater); |

3493 | } |

3494 | } else if(alt == Alt.less) { |

3495 | if(swaps == 0) { |

3496 | return 1; |

3497 | } |

3498 | return kendallCorExactP(N, maxSwaps - swaps + 0, Alt.greater); |

3499 | } else if(alt == Alt.twoSided) { |

3500 | if(swaps < expectedSwaps) { |

3501 | return min(1, 2 * kendallCorExactP(N, swaps, Alt.greater)); |

3502 | } else if(swaps > expectedSwaps) { |

3503 | return min(1, 2 * kendallCorExactP(N, swaps, Alt.less)); |

3504 | } else { |

3505 | return 1; |

3506 | } |

3507 | } else { // Alt.none |

3508 | return double.nan; |

3509 | } |

3510 | |

3511 | /* This algorithm was obtained from Question 948341 on stackoverflow.com |

3512 | * and works as follows: |

3513 | * |

3514 | * swaps is the number of swaps that would be necessary in a bubble sort |

3515 | * to sort one list in the same order as the other. N is the sample size. |

3516 | * We want to find the number of ways that we could get a bubble sort |

3517 | * distance of at least swaps, and divide it by the total number of |

3518 | * permutations, pElem. |

3519 | * |

3520 | * The number of swaps necessary to sort a list is equivalent to the |

3521 | * number of inversions in the list, i.e. where i > j, but |

3522 | * list[i] < list[j]. This is a bottom-up dynamic programming algorithm |

3523 | * based on this principle. |

3524 | * |

3525 | * Assume c(N, k) is the number of permutations that require <= swaps |

3526 | * inversions. |

3527 | * We use the recurrence relation: |

3528 | * When k Ã¢Â‰â‚¬ N - 1, c(N,k) = c(N,k-1) + c(N-1,k) |

3529 | * When k Ã¢Â‰Â¥ N, c(N,k) = c(N,k-1) + c(N-1,k) - c(N-1,k-N) |

3530 | * |

3531 | * We also divide every value by the constant N! to turn this count into a |

3532 | * probability. |

3533 | */ |

3534 | |

3535 | immutable double pElem = exp(-logFactorial(N)); |

3536 | double[] cur = newStack!double(swaps + 1); |

3537 | double[] prev = newStack!double(swaps + 1); |

3538 | |

3539 | prev[] = pElem; |

3540 | cur[0] = pElem; |

3541 | foreach(k; 1..N + 1) { |

3542 | immutable uint nSwapsPossible = k * (k - 1) / 2; |

3543 | immutable uint upTo = min(swaps, nSwapsPossible) + 1; |

3544 | foreach(j; 1..upTo) { |

3545 | if(j < k) { |

3546 | cur[j] = prev[j] + cur[j - 1]; |

3547 | } else { |

3548 | cur[j] = prev[j] - prev[j - k] + cur[j - 1]; |

3549 | } |

3550 | } |

3551 | cur[upTo..$] = cur[upTo - 1]; |

3552 | swap(cur, prev); |

3553 | } |

3554 | TempAlloc.free; |

3555 | TempAlloc.free; |

3556 | return prev[$ - 1]; |

3557 | } |

3558 | |

3559 | unittest { |

3560 | /* Values from R, with continuity correction enabled. Note that large |

3561 | * one-sided inexact P-values are commented out because R seems to have a |

3562 | * slightly different interpretation of the proper continuity correction |

3563 | * than this library. This library corrects the z-score in the direction |

3564 | * that would make the test more conservative. R corrects towards zero. |

3565 | * I can't find a reference to support either one, but empirically it seems |

3566 | * like correcting towards more conservative results more accurately mirrors |

3567 | * the results of the exact test. This isn't too big a deal anyhow since: |

3568 | * |

3569 | * 1. The difference is small. |

3570 | * 2. It only occurs on results that are very far from significance |

3571 | * (P > 0.5). |

3572 | */ |

3573 | int[] arr1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]; |

3574 | int[] arr2 = [8,6,7,5,3,0,9,8,6,7,5,3,0,9,3,6,2,4,3,6,8]; |

3575 | auto t1 = kendallCorTest(arr1, arr2, Alt.twoSided); |

3576 | auto t2 = kendallCorTest(arr1, arr2, Alt.less); |

3577 | auto t3 = kendallCorTest(arr1, arr2, Alt.greater); |

3578 | |

3579 | assert(approxEqual(t1.testStat, -.1448010)); |

3580 | assert(approxEqual(t2.testStat, -.1448010)); |

3581 | assert(approxEqual(t3.testStat, -.1448010)); |

3582 | |

3583 | assert(approxEqual(t1.p, 0.3923745)); |

3584 | //assert(approxEqual(t3.p, 0.8038127)); |

3585 | assert(approxEqual(t2.p, 0.1961873)); |

3586 | |

3587 | // Now, test the case of ties in both arrays. |

3588 | arr1 = [1,1,1,2,2,3,4,5,5,6]; |

3589 | arr2 = [1,1,2,3,4,5,5,5,5,6]; |

3590 | assert(approxEqual(kendallCorTest(arr1, arr2, Alt.twoSided).p, 0.001216776)); |

3591 | //assert(approxEqual(kendallCorTest(arr1, arr2, Alt.less).p, 0.9993916)); |

3592 | assert(approxEqual(kendallCorTest(arr1, arr2, Alt.greater).p, 0.0006083881)); |

3593 | |

3594 | arr1 = [1,1,1,2,2,2,3,3,3,4,4,4,5,5,5]; |

3595 | arr2 = [1,1,1,3,3,3,2,2,2,5,5,5,4,4,4]; |

3596 | assert(approxEqual(kendallCorTest(arr1, arr2).p, 0.006123)); |

3597 | |

3598 | // Test the exact stuff. Still using values from R. |

3599 | uint[] foo = [1,2,3,4,5]; |

3600 | uint[] bar = [1,2,3,5,4]; |

3601 | uint[] baz = [5,3,1,2,4]; |

3602 | |

3603 | assert(approxEqual(kendallCorTest(foo, foo).p, 0.01666666)); |

3604 | assert(approxEqual(kendallCorTest(foo, foo, Alt.greater).p, 0.008333333)); |

3605 | assert(approxEqual(kendallCorTest(foo, foo, Alt.less).p, 1)); |

3606 | |

3607 | assert(approxEqual(kendallCorTest(foo, bar).p, 0.083333333)); |

3608 | assert(approxEqual(kendallCorTest(foo, bar, Alt.greater).p, 0.041666667)); |

3609 | assert(approxEqual(kendallCorTest(foo, bar, Alt.less).p, 0.9917)); |

3610 | |

3611 | assert(approxEqual(kendallCorTest(foo, baz).p, 0.8167)); |

3612 | assert(approxEqual(kendallCorTest(foo, baz, Alt.greater).p, 0.7583)); |

3613 | assert(approxEqual(kendallCorTest(foo, baz, Alt.less).p, .4083)); |

3614 | |

3615 | assert(approxEqual(kendallCorTest(bar, baz).p, 0.4833)); |

3616 | assert(approxEqual(kendallCorTest(bar, baz, Alt.greater).p, 0.8833)); |

3617 | assert(approxEqual(kendallCorTest(bar, baz, Alt.less).p, 0.2417)); |

3618 | |

3619 | // A little monte carlo unittesting. For large ranges, the deviation |

3620 | // between the exact and approximate version should be extremely small. |

3621 | foreach(i; 0..100) { |

3622 | uint nToTake = uniform(15, 65); |

3623 | auto lhs = toArray(take(randRange!rNorm(0, 1), nToTake)); |

3624 | auto rhs = toArray(take(randRange!rNorm(0, 1), nToTake)); |

3625 | if(i & 1) { |

3626 | lhs[] += rhs[] * 0.2; // Make sure there's some correlation. |

3627 | } else { |

3628 | lhs[] -= rhs[] * 0.2; |

3629 | } |

3630 | double exact = kendallCorTest(lhs, rhs).p; |

3631 | double approx = kendallCorTest(lhs, rhs, Alt.twoSided, 0).p; |

3632 | assert(abs(exact - approx) < 0.01); |

3633 | |

3634 | exact = kendallCorTest(lhs, rhs, Alt.greater).p; |

3635 | approx = kendallCorTest(lhs, rhs, Alt.greater, 0).p; |

3636 | assert(abs(exact - approx) < 0.01); |

3637 | |

3638 | exact = kendallCorTest(lhs, rhs, Alt.less).p; |

3639 | approx = kendallCorTest(lhs, rhs, Alt.less, 0).p; |

3640 | assert(abs(exact - approx) < 0.01); |

3641 | } |

3642 | } |

3643 | |

3644 | /**A test for normality of the distribution of a range of values. Based on |

3645 | * the assumption that normally distributed values will have a sample skewness |

3646 | * and sample kurtosis very close to zero. |

3647 | * |

3648 | * Returns: A TestRes with the K statistic, which is Chi-Square distributed |

3649 | * with 2 degrees of freedom under the null, and the P-value for the alternative |

3650 | * that the data has skewness and kurtosis not equal to zero against the null |

3651 | * that skewness and kurtosis are near zero. A normal distribution always has |

3652 | * skewness and kurtosis that converge to zero as sample size goes to infinity. |

3653 | * |

3654 | * Notes: Contrary to popular belief, tests for normality should usually |

3655 | * not be used to deterimine whether T-tests are valid. If the sample size is |

3656 | * large, T-tests are valid regardless of the distribution due to the central |

3657 | * limit theorem. If the sample size is small, a test for normality will |

3658 | * likely not be very powerful, and a priori knowledge or simple inspection |

3659 | * of the data is often a better idea. |

3660 | * |

3661 | * References: |

3662 | * D'Agostino, Ralph B., Albert Belanger, and Ralph B. D'Agostino, Jr. |

3663 | * "A Suggestion for Using Powerful and Informative Tests of Normality", |

3664 | * The American Statistician, Vol. 44, No. 4. (Nov., 1990), pp. 316-321. |

3665 | */ |

3666 | TestRes dAgostinoK(T)(T range) |

3667 | if(doubleIterable!(T)) { |

3668 | // You are not meant to understand this. I sure don't. I just implemented |

3669 | // these formulas off of Wikipedia, which got them from: |

3670 | |

3671 | // D'Agostino, Ralph B., Albert Belanger, and Ralph B. D'Agostino, Jr. |

3672 | // "A Suggestion for Using Powerful and Informative Tests of Normality", |

3673 | // The American Statistician, Vol. 44, No. 4. (Nov., 1990), pp. 316-321. |

3674 | |

3675 | // Amazing. I didn't even realize things this complicated were possible |

3676 | // in 1990, before widespread computer algebra systems. |

3677 | |

3678 | // Notation from Wikipedia. Keeping same notation for simplicity. |

3679 | double sqrtb1 = void, b2 = void, n = void; |

3680 | { |

3681 | auto summ = summary(range); |

3682 | sqrtb1 = summ.skewness; |

3683 | b2 = summ.kurtosis + 3; |

3684 | n = summ.N; |

3685 | } |

3686 | |

3687 | // Calculate transformed skewness. |

3688 | double Y = sqrtb1 * sqrt((n + 1) * (n + 3) / (6 * (n - 2))); |

3689 | double beta2b1Numer = 3 * (n * n + 27 * n - 70) * (n + 1) * (n + 3); |

3690 | double beta2b1Denom = (n - 2) * (n + 5) * (n + 7) * (n + 9); |

3691 | double beta2b1 = beta2b1Numer / beta2b1Denom; |

3692 | double Wsq = -1 + sqrt(2 * (beta2b1 - 1)); |

3693 | double delta = 1.0 / sqrt(log(sqrt(Wsq))); |

3694 | double alpha = sqrt( 2.0 / (Wsq - 1)); |

3695 | double Zb1 = delta * log(Y / alpha + sqrt(pow(Y / alpha, 2) + 1)); |

3696 | |

3697 | // Calculate transformed kurtosis. |

3698 | double Eb2 = 3 * (n - 1) / (n + 1); |

3699 | double sigma2b2 = (24 * n * (n - 2) * (n - 3)) / ( |

3700 | (n + 1) * (n + 1) * (n + 3) * (n + 5)); |

3701 | double x = (b2 - Eb2) / sqrt(sigma2b2); |

3702 | |

3703 | double sqBeta1b2 = 6 * (n * n - 5 * n + 2) / ((n + 7) * (n + 9)) * |

3704 | sqrt((6 * (n + 3) * (n + 5)) / (n * (n - 2) * (n - 3))); |

3705 | double A = 6 + 8 / sqBeta1b2 * (2 / sqBeta1b2 + sqrt(1 + 4 / (sqBeta1b2 * sqBeta1b2))); |

3706 | double Zb2 = ((1 - 2 / (9 * A)) - |

3707 | cbrt((1 - 2 / A) / (1 + x * sqrt(2 / (A - 4)))) ) * |

3708 | sqrt(9 * A / 2); |

3709 | |

3710 | double K2 = Zb1 * Zb1 + Zb2 * Zb2; |

3711 | |

3712 | if(isNaN(K2)) { |

3713 | return TestRes(double.nan, double.nan); |

3714 | } |

3715 | |

3716 | return TestRes(K2, chiSquareCDFR(K2, 2)); |

3717 | } |

3718 | |

3719 | unittest { |

3720 | // Values from R's fBasics package. |

3721 | int[] arr1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]; |

3722 | int[] arr2 = [8,6,7,5,3,0,9,8,6,7,5,3,0,9,3,6,2,4,3,6,8]; |

3723 | |

3724 | auto r1 = dAgostinoK(arr1); |

3725 | auto r2 = dAgostinoK(arr2); |

3726 | |

3727 | assert(approxEqual(r1.testStat, 3.1368)); |

3728 | assert(approxEqual(r1.p, 0.2084)); |

3729 | |

3730 | assert(approxEqual(r2.testStat, 1.1816)); |

3731 | assert(approxEqual(r2.p, 0.5539)); |

3732 | } |

3733 | |

3734 | /**Fisher's method of meta-analyzing a set of P-values to determine whether |

3735 | * there are more significant results than would be expected by chance. |

3736 | * Based on a chi-square statistic for the sum of the logs of the P-values. |

3737 | * |

3738 | * Returns: A TestRes containing the chi-square statistic and a P-value for |

3739 | * the alternative hypothesis that more small P-values than would be expected |

3740 | * by chance are present against the alternative that the distribution of |

3741 | * P-values is uniform or enriched for large P-values. |

3742 | * |

3743 | * References: Fisher, R. A. (1948) "Combining independent tests of |

3744 | * significance", American Statistician, vol. 2, issue 5, page 30. |

3745 | * (In response to Question 14) |

3746 | */ |

3747 | TestRes fishersMethod(R)(R pVals) |

3748 | if(doubleInput!R) { |

3749 | double chiSq = 0; |

3750 | uint df = 0; |

3751 | foreach(pVal; pVals) { |

3752 | dstatsEnforce(pVal >= 0 && pVal <= 1, |

3753 | "P-values must be between 0, 1 for Fisher's Method."); |

3754 | chiSq += log( cast(double) pVal); |

3755 | df += 2; |

3756 | } |

3757 | chiSq *= -2; |

3758 | return TestRes(chiSq, chiSquareCDFR(chiSq, df)); |

3759 | } |

3760 | |

3761 | unittest { |

3762 | // First, basic sanity check. Make sure w/ one P-value, we get back that |

3763 | // P-value. |

3764 | for(double p = 0.01; p < 1; p += 0.01) { |

3765 | assert(approxEqual(fishersMethod([p].dup).p, p)); |

3766 | } |

3767 | float[] ps = [0.739, 0.0717, 0.01932, 0.03809]; |

3768 | auto res = fishersMethod(ps); |

3769 | assert(approxEqual(res.testStat, 20.31)); |

3770 | assert(res.p < 0.01); |

3771 | } |

3772 | |

3773 | /// For falseDiscoveryRate. |

3774 | enum Dependency { |

3775 | /// Assume that dependency among hypotheses may exist. (More conservative.) |

3776 | yes, |

3777 | |

3778 | /// Assume hypotheses are independent. (Less conservative.) |

3779 | no, |

3780 | |

3781 | // Kept for compatibility with old style, intentionally not documented, |

3782 | // may eventually be removed. |

3783 | TRUE = yes, |

3784 | FALSE = no |

3785 | } |

3786 | |

3787 | /**Computes the false discovery rate statistic given a list of |

3788 | * p-values, according to Benjamini and Hochberg (1995) (independent) or |

3789 | * Benjamini and Yekutieli (2001) (dependent). The Dependency parameter |

3790 | * controls whether hypotheses are assumed to be independent, or whether |

3791 | * the more conservative assumption that they are correlated must be made. |

3792 | * |

3793 | * Returns: |

3794 | * An array of adjusted P-values with indices corresponding to the order of |

3795 | * the P-values in the input data. |

3796 | * |

3797 | * References: |

3798 | * Benjamini, Y., and Hochberg, Y. (1995). Controlling the false discovery rate: |

3799 | * a practical and powerful approach to multiple testing. Journal of the Royal |

3800 | * Statistical Society Series B, 57, 289-200 |

3801 | * |

3802 | * Benjamini, Y., and Yekutieli, D. (2001). The control of the false discovery |

3803 | * rate in multiple testing under dependency. Annals of Statistics 29, 1165-1188. |

3804 | */ |

3805 | float[] falseDiscoveryRate(T)(T pVals, Dependency dep = Dependency.no) |

3806 | if(doubleInput!(T)) { |

3807 | auto qVals = array(map!(to!float)(pVals)); |

3808 | |

3809 | double C = 1; |

3810 | if(dep == Dependency.yes) { |

3811 | foreach(i; 2..qVals.length + 1) { |

3812 | C += 1.0 / i; |

3813 | } |

3814 | } |

3815 | |

3816 | mixin(newFrame); |

3817 | auto perm = newStack!(size_t)(qVals.length); |

3818 | foreach(i, ref elem; perm) { |

3819 | elem = i; |

3820 | } |

3821 | |

3822 | qsort(qVals, perm); |

3823 | |

3824 | foreach(i, ref q; qVals) { |

3825 | q = min(1.0f, q * C * cast(double) qVals.length / (cast(double) i + 1)); |

3826 | } |

3827 | |

3828 | float smallestSeen = float.max; |

3829 | foreach(ref q; retro(qVals)) { |

3830 | if(q < smallestSeen) { |

3831 | smallestSeen = q; |

3832 | } else { |

3833 | q = smallestSeen; |

3834 | } |

3835 | } |

3836 | |

3837 | qsort(perm, qVals); //Makes order of qVals correspond to input. |

3838 | return qVals; |

3839 | } |

3840 | |

3841 | unittest { |

3842 | // Comparing results to R. |

3843 | auto pVals = [.90, .01, .03, .03, .70, .60, .01].dup; |

3844 | auto qVals = falseDiscoveryRate(pVals); |

3845 | alias approxEqual ae; |

3846 | assert(ae(qVals[0], .9)); |

3847 | assert(ae(qVals[1], .035)); |

3848 | assert(ae(qVals[2], .052)); |

3849 | assert(ae(qVals[3], .052)); |

3850 | assert(ae(qVals[4], .816666666667)); |

3851 | assert(ae(qVals[5], .816666666667)); |

3852 | assert(ae(qVals[6], .035)); |

3853 | |

3854 | auto p2 = [.1, .02, .6, .43, .001].dup; |

3855 | auto q2 = falseDiscoveryRate(p2); |

3856 | assert(ae(q2[0], .16666666)); |

3857 | assert(ae(q2[1], .05)); |

3858 | assert(ae(q2[2], .6)); |

3859 | assert(ae(q2[3], .5375)); |

3860 | assert(ae(q2[4], .005)); |

3861 | |

3862 | // Dependent case. |

3863 | qVals = falseDiscoveryRate(pVals, Dependency.TRUE); |

3864 | assert(ae(qVals[0], 1)); |

3865 | assert(ae(qVals[1], .09075)); |

3866 | assert(ae(qVals[2], .136125)); |

3867 | assert(ae(qVals[3], .136125)); |

3868 | assert(ae(qVals[4], 1)); |

3869 | assert(ae(qVals[5], 1)); |

3870 | assert(ae(qVals[6], .09075)); |

3871 | |

3872 | q2 = falseDiscoveryRate(p2, Dependency.TRUE); |

3873 | assert(ae(q2[0], .38055555)); |

3874 | assert(ae(q2[1], .1141667)); |

3875 | assert(ae(q2[2], 1)); |

3876 | assert(ae(q2[3], 1)); |

3877 | assert(ae(q2[4], .01141667)); |

3878 | } |

3879 | |

3880 | /**Uses the Hochberg procedure to control the familywise error rate assuming |

3881 | * that hypothesis tests are independent. This is more powerful than |

3882 | * Holm-Bonferroni correction, but requires the independence assumption. |

3883 | * |

3884 | * Returns: |

3885 | * An array of adjusted P-values with indices corresponding to the order of |

3886 | * the P-values in the input data. |

3887 | * |

3888 | * References: |

3889 | * Hochberg, Y. (1988). A sharper Bonferroni procedure for multiple tests of |

3890 | * significance. Biometrika, 75, 800-803. |

3891 | */ |

3892 | float[] hochberg(T)(T pVals) |

3893 | if(doubleInput!(T)) { |

3894 | auto qVals = array(map!(to!float)(pVals)); |

3895 | |

3896 | mixin(newFrame); |

3897 | auto perm = newStack!(size_t)(qVals.length); |

3898 | foreach(i, ref elem; perm) |

3899 | elem = i; |

3900 | |

3901 | qsort(qVals, perm); |

3902 | |

3903 | foreach(i, ref q; qVals) { |

3904 | dstatsEnforce(q >= 0 && q <= 1, |

3905 | "P-values must be between 0, 1 for hochberg."); |

3906 | q = min(1.0f, q * (cast(double) qVals.length - i)); |

3907 | } |

3908 | |

3909 | float smallestSeen = float.max; |

3910 | foreach(ref q; retro(qVals)) { |

3911 | if(q < smallestSeen) { |

3912 | smallestSeen = q; |

3913 | } else { |

3914 | q = smallestSeen; |

3915 | } |

3916 | } |

3917 | |

3918 | qsort(perm, qVals); //Makes order of qVals correspond to input. |

3919 | return qVals; |

3920 | } |

3921 | |

3922 | unittest { |

3923 | alias approxEqual ae; |

3924 | auto q = hochberg([0.01, 0.02, 0.025, 0.9].dup); |

3925 | assert(ae(q[0], 0.04)); |

3926 | assert(ae(q[1], 0.05)); |

3927 | assert(ae(q[2], 0.05)); |

3928 | assert(ae(q[3], 0.9)); |

3929 | |

3930 | auto p2 = [.1, .02, .6, .43, .001].dup; |

3931 | auto q2 = hochberg(p2); |

3932 | assert(ae(q2[0], .3)); |

3933 | assert(ae(q2[1], .08)); |

3934 | assert(ae(q2[2], .6)); |

3935 | assert(ae(q2[3], .6)); |

3936 | assert(ae(q2[4], .005)); |

3937 | } |

3938 | |

3939 | /**Uses the Holm-Bonferroni method to adjust a set of P-values in a way that |

3940 | * controls the familywise error rate (The probability of making at least one |

3941 | * Type I error). This is basically a less conservative version of |

3942 | * Bonferroni correction that is still valid for arbitrary assumptions and |

3943 | * controls the familywise error rate. Therefore, there aren't too many good |

3944 | * reasons to use regular Bonferroni correction instead. |

3945 | * |

3946 | * Returns: |

3947 | * An array of adjusted P-values with indices corresponding to the order of |

3948 | * the P-values in the input data. |

3949 | * |

3950 | * References: |

3951 | * Holm, S. (1979). A simple sequentially rejective multiple test procedure. |

3952 | * Scandinavian Journal of Statistics, 6, 65-70. |

3953 | */ |

3954 | float[] holmBonferroni(T)(T pVals) |

3955 | if(doubleInput!(T)) { |

3956 | mixin(newFrame); |

3957 | |

3958 | auto qVals = array(map!(to!float)(pVals)); |

3959 | auto perm = newStack!(size_t)(qVals.length); |

3960 | |

3961 | foreach(i, ref elem; perm) { |

3962 | elem = i; |

3963 | } |

3964 | |

3965 | qsort(qVals, perm); |

3966 | |

3967 | foreach(i, ref q; qVals) { |

3968 | dstatsEnforce(q >= 0 && q <= 1, |

3969 | "P-values must be between 0, 1 for holmBonferroni."); |

3970 | q = min(1.0, q * (cast(double) qVals.length - i)); |

3971 | } |

3972 | |

3973 | foreach(i; 1..qVals.length) { |

3974 | if(qVals[i] < qVals[i - 1]) { |

3975 | qVals[i] = qVals[i - 1]; |

3976 | } |

3977 | } |

3978 | |

3979 | qsort(perm, qVals); //Makes order of qVals correspond to input. |

3980 | return qVals; |

3981 | } |

3982 | |

3983 | unittest { |

3984 | // Values from R. |

3985 | auto ps = holmBonferroni([0.001, 0.2, 0.3, 0.4, 0.7].dup); |

3986 | alias approxEqual ae; |

3987 | assert(ae(ps[0], 0.005)); |

3988 | assert(ae(ps[1], 0.8)); |

3989 | assert(ae(ps[2], 0.9)); |

3990 | assert(ae(ps[3], 0.9)); |

3991 | assert(ae(ps[4], 0.9)); |

3992 | |

3993 | ps = holmBonferroni([0.3, 0.1, 0.4, 0.1, 0.5, 0.9].dup); |

3994 | assert(ps == [1f, 0.6f, 1f, 0.6f, 1f, 1f]); |

3995 | } |

3996 | |

3997 | |

3998 | // Verify that there are no TempAlloc memory leaks anywhere in the code covered |

3999 | // by the unittest. This should always be the last unittest of the module. |

4000 | unittest { |

4001 | auto TAState = TempAlloc.getState; |

4002 | assert(TAState.used == 0); |

4003 | assert(TAState.nblocks < 2); |

4004 | } |

**Note:**See TracBrowser for help on using the browser.