<?xml version="1.0" encoding="UTF-8"?>
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns="http://purl.org/rss/1.0/" xmlns:dc="http://purl.org/dc/elements/1.1/">
  <channel rdf:about="https://community.wolfram.com">
    <title>Community RSS Feed</title>
    <link>https://community.wolfram.com</link>
    <description>RSS Feed for Wolfram Community showing any discussions tagged with Operations Research sorted by active.</description>
    <items>
      <rdf:Seq>
        <rdf:li rdf:resource="https://community.wolfram.com/groups/-/m/t/3646989" />
        <rdf:li rdf:resource="https://community.wolfram.com/groups/-/m/t/3461424" />
        <rdf:li rdf:resource="https://community.wolfram.com/groups/-/m/t/3419210" />
        <rdf:li rdf:resource="https://community.wolfram.com/groups/-/m/t/3271465" />
        <rdf:li rdf:resource="https://community.wolfram.com/groups/-/m/t/3260391" />
        <rdf:li rdf:resource="https://community.wolfram.com/groups/-/m/t/3245846" />
        <rdf:li rdf:resource="https://community.wolfram.com/groups/-/m/t/944004" />
        <rdf:li rdf:resource="https://community.wolfram.com/groups/-/m/t/3212029" />
        <rdf:li rdf:resource="https://community.wolfram.com/groups/-/m/t/3209918" />
        <rdf:li rdf:resource="https://community.wolfram.com/groups/-/m/t/3204807" />
        <rdf:li rdf:resource="https://community.wolfram.com/groups/-/m/t/3149566" />
        <rdf:li rdf:resource="https://community.wolfram.com/groups/-/m/t/3055705" />
        <rdf:li rdf:resource="https://community.wolfram.com/groups/-/m/t/2920790" />
        <rdf:li rdf:resource="https://community.wolfram.com/groups/-/m/t/3050770" />
        <rdf:li rdf:resource="https://community.wolfram.com/groups/-/m/t/3017901" />
        <rdf:li rdf:resource="https://community.wolfram.com/groups/-/m/t/2997003" />
        <rdf:li rdf:resource="https://community.wolfram.com/groups/-/m/t/2987807" />
        <rdf:li rdf:resource="https://community.wolfram.com/groups/-/m/t/2938862" />
        <rdf:li rdf:resource="https://community.wolfram.com/groups/-/m/t/220551" />
        <rdf:li rdf:resource="https://community.wolfram.com/groups/-/m/t/193644" />
      </rdf:Seq>
    </items>
  </channel>
  <item rdf:about="https://community.wolfram.com/groups/-/m/t/3646989">
    <title>Salvo combat modeling: Battle of Coronel</title>
    <link>https://community.wolfram.com/groups/-/m/t/3646989</link>
    <description>![Salvo Combat Modeling: Battle of Coronel][1]&#xD;
&#xD;
&amp;amp;[Wolfram Notebook][2]&#xD;
&#xD;
&#xD;
  [1]: https://community.wolfram.com//c/portal/getImageAttachment?filename=SalvocombatmodelingBattleofCoronel.png&amp;amp;userId=20103&#xD;
  [2]: https://www.wolframcloud.com/obj/3a7678cb-0540-4a1d-85d8-793ac90dbe65</description>
    <dc:creator>Anton Antonov</dc:creator>
    <dc:date>2026-03-01T18:18:51Z</dc:date>
  </item>
  <item rdf:about="https://community.wolfram.com/groups/-/m/t/3461424">
    <title>How to speed up numerical calculations of the inverse method in game theory？</title>
    <link>https://community.wolfram.com/groups/-/m/t/3461424</link>
    <description>Hi, guys  &#xD;
I would like to know how to speed up numerical calculations of the inverse method in a game theoretical model Thank you very much!&#xD;
&#xD;
    Clear[&amp;#034;`*&amp;#034;];&#xD;
    (*Parameter Definition*)a = 10;&#xD;
    c = {1, 2, 3, 8};&#xD;
    Kvals = {4, 4, 2, 1};&#xD;
    (*The fourth person&amp;#039;s best response*)&#xD;
    q4Opt[q1_?NumericQ, q2_?NumericQ, q3_?NumericQ] := &#xD;
     Module[{q4}, &#xD;
      q4 /. FindMaximum[{(a - (q1 + q2 + q3 + q4)) q4 - c[[4]] q4, &#xD;
          0 &amp;lt;= q4 &amp;lt;= Kvals[[4]]}, {q4, 5}][[2]]]&#xD;
    &#xD;
    (*The third person&amp;#039;s best response*)&#xD;
    q3Opt[q1_?NumericQ, q2_?NumericQ] := &#xD;
     Module[{q3}, &#xD;
      q3 /. FindMaximum[{(a - (q1 + q2 + q3 + q4Opt[q1, q2, q3])) q3 - &#xD;
           c[[3]] q3, 0 &amp;lt;= q3 &amp;lt;= Kvals[[3]]}, {q3, 10}][[2]]]&#xD;
    &#xD;
    (*The second person&amp;#039;s best response*)&#xD;
    q2Opt[q1_?NumericQ] := &#xD;
     Module[{q2}, &#xD;
      q2 /. FindMaximum[{(a - (q1 + q2 + q3Opt[q1, q2] + &#xD;
                q4Opt[q1, q2, q3Opt[q1, q2]])) q2 - c[[2]] q2, &#xD;
          0 &amp;lt;= q2 &amp;lt;= Kvals[[2]]}, {q2, 12.5}][[2]]]&#xD;
    &#xD;
    (*The first person&amp;#039;s best decision*)&#xD;
    q1Opt = Module[{q1}, &#xD;
       q1 /. FindMaximum[{(a - (q1 + q2Opt[q1] + q3Opt[q1, q2Opt[q1]] + &#xD;
                 q4Opt[q1, q2Opt[q1], q3Opt[q1, q2Opt[q1]]])) q1 - &#xD;
            c[[1]] q1, 0 &amp;lt;= q1 &amp;lt;= Kvals[[1]]}, {q1, 15}][[2]]];&#xD;
    &#xD;
    (*Output*)&#xD;
    Print[&amp;#034;Optimal decisions:&amp;#034;];&#xD;
    Print[&amp;#034;q1 = &amp;#034;, q1Opt];&#xD;
    q2Val = q2Opt[q1Opt];&#xD;
    Print[&amp;#034;q2 = &amp;#034;, q2Val];&#xD;
    q3Val = q3Opt[q1Opt, q2Val];&#xD;
    Print[&amp;#034;q3 = &amp;#034;, q3Val];&#xD;
    q4Val = q4Opt[q1Opt, q2Val, q3Val];&#xD;
    Print[&amp;#034;q4 = &amp;#034;, q4Val];</description>
    <dc:creator>yanshao Wang</dc:creator>
    <dc:date>2025-05-15T14:47:04Z</dc:date>
  </item>
  <item rdf:about="https://community.wolfram.com/groups/-/m/t/3419210">
    <title>Can I embed an inner loop (rather long) in a function that I can then optimize numerically?</title>
    <link>https://community.wolfram.com/groups/-/m/t/3419210</link>
    <description>Hello!  &#xD;
I&amp;#039;m not an expert in Mathematica.&#xD;
&#xD;
I&amp;#039;ve tried brute-force inner loop-outer loop grid search for an optimization exercise for a rather rich problem: the inner loop solves a problem that involves solving a number of smaller problems for a (potentially large) sample and performing operations on the results of said smaller problems.&#xD;
&#xD;
I would like to automatize the outer loop for it to use numerical optimization (eg NMinimize) to find a global optimum. For that, I need NMinimize (or alternative) to work with an objective function that calls the inner loop and gets, as return, the scalar value of the objective function for a given set of values for the choice variables.&#xD;
&#xD;
I&amp;#039;m not sure about the right/best way to do this. I&amp;#039;ve tried the attached code below for a much simpler inner loop and it does work. I will try to now embed the much longer inner loop of the full problem following this idea but any ideas to improve upon it would be much appreciated. Thanks very much for your attention!&#xD;
&#xD;
&amp;amp;[Wolfram Notebook][1]&#xD;
&#xD;
&#xD;
  [1]: https://www.wolframcloud.com/obj/dda13b85-129c-4223-a51d-b7148dfd7853</description>
    <dc:creator>Paco Martín</dc:creator>
    <dc:date>2025-03-18T13:27:49Z</dc:date>
  </item>
  <item rdf:about="https://community.wolfram.com/groups/-/m/t/3271465">
    <title>Polytopality of simple games</title>
    <link>https://community.wolfram.com/groups/-/m/t/3271465</link>
    <description>![Re-triangulation of a disk on a Bier sphere. Radial variation of vertices. Pentagonal cycle and its dual, the Möbius band. Polytopality of simple games][1]&#xD;
&#xD;
&amp;amp;[Wolfram Notebook][2]&#xD;
&#xD;
&#xD;
  [1]: https://community.wolfram.com//c/portal/getImageAttachment?filename=5511HeroImage.png&amp;amp;userId=20103&#xD;
  [2]: https://www.wolframcloud.com/obj/b35fcd6d-7929-4e4f-9126-922010cc38a9</description>
    <dc:creator>Marinko Timotijević</dc:creator>
    <dc:date>2024-09-10T19:38:27Z</dc:date>
  </item>
  <item rdf:about="https://community.wolfram.com/groups/-/m/t/3260391">
    <title>A novel real-time calculus for arbitrary job patterns and deadlines</title>
    <link>https://community.wolfram.com/groups/-/m/t/3260391</link>
    <description>![A novel real-time calculus for arbitrary job patterns and deadlines][1]&#xD;
&#xD;
&amp;amp;[Wolfram Notebook][2]&#xD;
&#xD;
&#xD;
  [1]: https://community.wolfram.com//c/portal/getImageAttachment?filename=4235Hero.png&amp;amp;userId=20103&#xD;
  [2]: https://www.wolframcloud.com/obj/f84c0e8d-6308-4aa0-8354-fdcbcbbda168</description>
    <dc:creator>Feras Fattohi</dc:creator>
    <dc:date>2024-08-29T18:33:56Z</dc:date>
  </item>
  <item rdf:about="https://community.wolfram.com/groups/-/m/t/3245846">
    <title>M/M/c retrial queue implementation in Wolfram Language</title>
    <link>https://community.wolfram.com/groups/-/m/t/3245846</link>
    <description>Looking for a Mathematica implementation of the M/M/c retrial queueing system</description>
    <dc:creator>Goran Djuknic</dc:creator>
    <dc:date>2024-08-11T05:02:40Z</dc:date>
  </item>
  <item rdf:about="https://community.wolfram.com/groups/-/m/t/944004">
    <title>ROC for classifier ensembles, bootstrapping, damaging, and interpolation</title>
    <link>https://community.wolfram.com/groups/-/m/t/944004</link>
    <description># Introduction&#xD;
&#xD;
The main goals of this post/document are:&#xD;
&#xD;
**i)** to demonstrate how to create versions and combinations of classifiers utilizing different perspectives,&#xD;
&#xD;
**ii)** to apply the Receiver Operating Characteristic (ROC) technique into evaluating the created classifiers (see \[[2](https://en.wikipedia.org/wiki/Receiver_operating_characteristic),[3](https://ccrma.stanford.edu/workshops/mir2009/references/ROCintro.pdf)\]) and &#xD;
&#xD;
**iii)** to illustrate the use of the *Mathematica* packages \[[5](https://github.com/antononcube/MathematicaForPrediction/blob/master/ROCFunctions.m),[6](https://github.com/antononcube/MathematicaForPrediction/blob/master/ClassifierEnsembles.m)\]. &#xD;
&#xD;
The concrete steps taken are the following:&#xD;
&#xD;
1. Obtain data: *Mathematica* built-in or external. Do some rudimentary analysis.&#xD;
&#xD;
2. Create an ensemble of classifiers and compare its performance to the individual classifiers in the ensemble.&#xD;
&#xD;
3. Produce classifier versions with from changed data in order to explore the effect of records outliers.&#xD;
&#xD;
4. Make a bootstrapping classifier ensemble and evaluate and compare its performance.&#xD;
&#xD;
5. Systematically diminish the training data and evaluate the results with ROC.&#xD;
&#xD;
6. Show how to do classifier interpolation utilizing ROC.&#xD;
&#xD;
In the steps above we skip the necessary preliminary data analysis. For the datasets we use in this document that analysis has been done elsewhere. (See [,,,].) Nevertheless, since ROC is mostly used for binary classifiers we want to analyze the class labels distributions in the datasets in order to designate which class labels are &amp;#034;positive&amp;#034; and which are &amp;#034;negative.&amp;#034;&#xD;
&#xD;
## ROC plots evaluation (in brief)&#xD;
&#xD;
Assume we are given a binary classifier with the class labels ***P*** and ***N*** (for &amp;#034;positive&amp;#034; and &amp;#034;negative&amp;#034; respectively). &#xD;
&#xD;
Consider the following measures True Positive Rate (TPR):&#xD;
&#xD;
$$ TPR:= \frac {correctly \:  classified \:  positives}{total \:  positives}. $$ &#xD;
&#xD;
and False Positive Rate (FPR):&#xD;
&#xD;
$$ FPR:= \frac {incorrectly \:  classified \:  negatives}{total \:  negatives}. $$&#xD;
&#xD;
Assume that we can change the classifier results with a parameter $\theta$ and produce a plot like this one:&#xD;
&#xD;
[![AdultDatasetEnsembleClassifier][14]][14]&#xD;
&#xD;
For each parameter value $\theta _{i}$ the point ${TPR(\theta _{i}), FPR(\theta _{i})}$ is plotted; points corresponding to consecutive $\theta _{i}$&amp;#039;s &#xD;
are connected with a line. We call the obtained curve *the ROC curve* for the classifier in consideration.&#xD;
The ROC curve resides in *the ROC space* as defined by the functions FPR and TPR corresponding respectively to the $x$-axis and the $y$-axis.&#xD;
&#xD;
The ideal classifier would have its ROC curve comprised of a line connecting \{0,0\} to \{0,1\} and a line connecting \{0,1\} to \{1,1\}.&#xD;
&#xD;
Given a classifier the ROC point closest to \{0,1\}, generally, would be considered to be the best point.&#xD;
&#xD;
# Used packages &#xD;
&#xD;
These commands load the used *Mathematica* packages \[[4](https://github.com/antononcube/MathematicaForPrediction/blob/master/MathematicaForPredictionUtilities.m),[5](https://github.com/antononcube/MathematicaForPrediction/blob/master/ROCFunctions.m),6\]: &#xD;
&#xD;
    Import[&amp;#034;https://raw.githubusercontent.com/antononcube/MathematicaForPrediction/master/MathematicaForPredictionUtilities.m&amp;#034;]&#xD;
    Import[&amp;#034;https://raw.githubusercontent.com/antononcube/MathematicaForPrediction/master/ROCFunctions.m&amp;#034;]&#xD;
    Import[&amp;#034;https://raw.githubusercontent.com/antononcube/MathematicaForPrediction/master/ClassifierEnsembles.m&amp;#034;]&#xD;
&#xD;
# Data used&#xD;
&#xD;
## The Titanic dataset&#xD;
&#xD;
These commands load the Titanic data (that is shipped with *Mathematica*).&#xD;
&#xD;
    data = ExampleData[{&amp;#034;MachineLearning&amp;#034;, &amp;#034;Titanic&amp;#034;}, &amp;#034;TrainingData&amp;#034;];&#xD;
    columnNames = (Flatten@*List) @@ ExampleData[{&amp;#034;MachineLearning&amp;#034;, &amp;#034;Titanic&amp;#034;}, &amp;#034;VariableDescriptions&amp;#034;];&#xD;
    data = ((Flatten@*List) @@@ data)[[All, {1, 2, 3, -1}]];&#xD;
    trainingData = DeleteCases[data, {___, _Missing, ___}];&#xD;
    Dimensions[trainingData]&#xD;
&#xD;
    (* {732, 4} *)&#xD;
&#xD;
    RecordsSummary[trainingData, columnNames]&#xD;
&#xD;
[![Records1][1]][1]&#xD;
&#xD;
    data = ExampleData[{&amp;#034;MachineLearning&amp;#034;, &amp;#034;Titanic&amp;#034;}, &amp;#034;TestData&amp;#034;];&#xD;
    data = ((Flatten@*List) @@@ data)[[All, {1, 2, 3, -1}]];&#xD;
    testData = DeleteCases[data, {___, _Missing, ___}];&#xD;
    Dimensions[testData]&#xD;
&#xD;
    (* {314, 4} *)&#xD;
&#xD;
    RecordsSummary[testData, columnNames]&#xD;
&#xD;
[![Records2][2]][2]&#xD;
&#xD;
    nTrainingData = trainingData /. {&amp;#034;survived&amp;#034; -&amp;gt; 1, &amp;#034;died&amp;#034; -&amp;gt; 0, &amp;#034;1st&amp;#034; -&amp;gt; 0, &amp;#034;2nd&amp;#034; -&amp;gt; 1, &amp;#034;3rd&amp;#034; -&amp;gt; 2, &amp;#034;male&amp;#034; -&amp;gt; 0, &amp;#034;female&amp;#034; -&amp;gt; 1};&#xD;
&#xD;
# Classifier ensembles&#xD;
&#xD;
This command makes a classifier ensemble of two built-in classifiers &amp;#034;NearestNeighbors&amp;#034; and &amp;#034;NeuralNetwork&amp;#034;:&#xD;
&#xD;
    aCLs = EnsembleClassifier[{&amp;#034;NearestNeighbors&amp;#034;, &amp;#034;NeuralNetwork&amp;#034;}, trainingData[[All, 1 ;; -2]] -&amp;gt; trainingData[[All, -1]]]&#xD;
&#xD;
[![Classifier][3]][3]&#xD;
&#xD;
A classifier ensemble of the package \[[6](https://github.com/antononcube/MathematicaForPrediction/blob/master/ClassifierEnsembles.m)\] is simply an association mapping classifier IDs to classifier functions.&#xD;
&#xD;
The first argument given to `EnsembleClassifier` can be `Automatic`:&#xD;
&#xD;
    SeedRandom[8989]&#xD;
    aCLs = EnsembleClassifier[Automatic, trainingData[[All, 1 ;; -2]] -&amp;gt; trainingData[[All, -1]]];&#xD;
&#xD;
With `Automatic` the following built-in classifiers are used:&#xD;
&#xD;
    Keys[aCLs]&#xD;
&#xD;
    (* {&amp;#034;NearestNeighbors&amp;#034;, &amp;#034;NeuralNetwork&amp;#034;, &amp;#034;LogisticRegression&amp;#034;, &amp;#034;RandomForest&amp;#034;, &amp;#034;SupportVectorMachine&amp;#034;, &amp;#034;NaiveBayes&amp;#034;} *)&#xD;
&#xD;
## Classification with ensemble votes&#xD;
&#xD;
Classification with the classifier ensemble can be done using the function `EnsembleClassify`. If the third argument of `EnsembleClassify` is &amp;#034;Votes&amp;#034; the result is the class label that appears the most in the ensemble results.&#xD;
&#xD;
    EnsembleClassify[aCLs, testData[[20, 1 ;; -2]], &amp;#034;Votes&amp;#034;]&#xD;
&#xD;
    (* &amp;#034;died&amp;#034; *)&#xD;
&#xD;
The following commands clarify the voting done in the command above.&#xD;
&#xD;
    Map[#[testData[[20, 1 ;; -2]]] &amp;amp;, aCLs]&#xD;
    Tally[Values[%]]&#xD;
&#xD;
    (* &amp;lt;|&amp;#034;NearestNeighbors&amp;#034; -&amp;gt; &amp;#034;died&amp;#034;, &amp;#034;NeuralNetwork&amp;#034; -&amp;gt; &amp;#034;survived&amp;#034;, &amp;#034;LogisticRegression&amp;#034; -&amp;gt; &amp;#034;survived&amp;#034;, &amp;#034;RandomForest&amp;#034; -&amp;gt; &amp;#034;died&amp;#034;, &amp;#034;SupportVectorMachine&amp;#034; -&amp;gt; &amp;#034;died&amp;#034;, &amp;#034;NaiveBayes&amp;#034; -&amp;gt; &amp;#034;died&amp;#034;|&amp;gt; *)&#xD;
&#xD;
    (* {{&amp;#034;died&amp;#034;, 4}, {&amp;#034;survived&amp;#034;, 2}} *)&#xD;
&#xD;
## Classification with ensemble averaged probabilities&#xD;
&#xD;
If the third argument of `EnsembleClassify` is &amp;#034;`ProbabilitiesMean`&amp;#034; the result is the class label that has the highest mean probability in the ensemble results.&#xD;
&#xD;
    EnsembleClassify[aCLs, testData[[20, 1 ;; -2]], &amp;#034;ProbabilitiesMean&amp;#034;]&#xD;
&#xD;
    (* &amp;#034;died&amp;#034; *)&#xD;
&#xD;
The following commands clarify the probability averaging utilized in the command above.&#xD;
&#xD;
    Map[#[testData[[20, 1 ;; -2]], &amp;#034;Probabilities&amp;#034;] &amp;amp;, aCLs]&#xD;
    Mean[Values[%]]&#xD;
&#xD;
    (* &amp;lt;|&amp;#034;NearestNeighbors&amp;#034; -&amp;gt; &amp;lt;|&amp;#034;died&amp;#034; -&amp;gt; 0.598464, &amp;#034;survived&amp;#034; -&amp;gt; 0.401536|&amp;gt;, &amp;#034;NeuralNetwork&amp;#034; -&amp;gt; &amp;lt;|&amp;#034;died&amp;#034; -&amp;gt; 0.469274, &amp;#034;survived&amp;#034; -&amp;gt; 0.530726|&amp;gt;, &amp;#034;LogisticRegression&amp;#034; -&amp;gt; &amp;lt;|&amp;#034;died&amp;#034; -&amp;gt; 0.445915, &amp;#034;survived&amp;#034; -&amp;gt; 0.554085|&amp;gt;, &#xD;
    &amp;#034;RandomForest&amp;#034; -&amp;gt; &amp;lt;|&amp;#034;died&amp;#034; -&amp;gt; 0.652414, &amp;#034;survived&amp;#034; -&amp;gt; 0.347586|&amp;gt;, &amp;#034;SupportVectorMachine&amp;#034; -&amp;gt; &amp;lt;|&amp;#034;died&amp;#034; -&amp;gt; 0.929831, &amp;#034;survived&amp;#034; -&amp;gt; 0.0701691|&amp;gt;, &amp;#034;NaiveBayes&amp;#034; -&amp;gt; &amp;lt;|&amp;#034;died&amp;#034; -&amp;gt; 0.622061, &amp;#034;survived&amp;#034; -&amp;gt; 0.377939|&amp;gt;|&amp;gt; *)&#xD;
&#xD;
    (* &amp;lt;|&amp;#034;died&amp;#034; -&amp;gt; 0.61966, &amp;#034;survived&amp;#034; -&amp;gt; 0.38034|&amp;gt; *)&#xD;
&#xD;
## ROC for ensemble votes&#xD;
&#xD;
The third argument of `EnsembleClassifyByThreshold` takes a rule of the form *label-&amp;gt;threshold*; the fourth argument is eighter &amp;#034;Votes&amp;#034; or &amp;#034;ProbabiltiesMean&amp;#034;.&#xD;
&#xD;
The following code computes the ROC curve for a range of votes.&#xD;
&#xD;
    rocRange = Range[0, Length[aCLs] - 1, 1];&#xD;
    aROCs = Table[(&#xD;
        cres = EnsembleClassifyByThreshold[aCLs, testData[[All, 1 ;; -2]], &amp;#034;survived&amp;#034; -&amp;gt; i, &amp;#034;Votes&amp;#034;]; ToROCAssociation[{&amp;#034;survived&amp;#034;, &amp;#034;died&amp;#034;}, testData[[All, -1]], cres]), {i, rocRange}];&#xD;
    ROCPlot[rocRange, aROCs, &amp;#034;PlotJoined&amp;#034; -&amp;gt; Automatic, GridLines -&amp;gt; Automatic]&#xD;
&#xD;
[![Ensemble1][4]][4]&#xD;
&#xD;
## ROC for ensemble probabilities mean&#xD;
&#xD;
If we want to compute ROC of a range of probability thresholds we `EnsembleClassifyByThreshold` with the fourth argument being &amp;#034;ProbabilitiesMean&amp;#034;.&#xD;
&#xD;
    EnsembleClassifyByThreshold[aCLs, testData[[1 ;; 6, 1 ;; -2]], &amp;#034;survived&amp;#034; -&amp;gt; 0.2, &amp;#034;ProbabilitiesMean&amp;#034;]&#xD;
&#xD;
    (* {&amp;#034;survived&amp;#034;, &amp;#034;survived&amp;#034;, &amp;#034;survived&amp;#034;, &amp;#034;survived&amp;#034;, &amp;#034;survived&amp;#034;, &amp;#034;survived&amp;#034;} *)&#xD;
&#xD;
    EnsembleClassifyByThreshold[aCLs, testData[[1 ;; 6, 1 ;; -2]], &amp;#034;survived&amp;#034; -&amp;gt; 0.6, &amp;#034;ProbabilitiesMean&amp;#034;]&#xD;
&#xD;
    (* {&amp;#034;survived&amp;#034;, &amp;#034;died&amp;#034;, &amp;#034;survived&amp;#034;, &amp;#034;died&amp;#034;, &amp;#034;died&amp;#034;, &amp;#034;survived&amp;#034;} *)&#xD;
&#xD;
The implementation of `EnsembleClassifyByThreshold` with &amp;#034;ProbabilitiesMean&amp;#034; relies on the `ClassifierFunction` signature:&#xD;
&#xD;
`ClassifierFunction[__][record_, &amp;#034;Probabilities&amp;#034;]`&#xD;
&#xD;
Here is the corresponding ROC plot:&#xD;
&#xD;
    rocRange = Range[0, 1, 0.025];&#xD;
    aROCs = Table[(&#xD;
        cres = EnsembleClassifyByThreshold[aCLs, testData[[All, 1 ;; -2]], &amp;#034;survived&amp;#034; -&amp;gt; i, &amp;#034;ProbabilitiesMean&amp;#034;]; ToROCAssociation[{&amp;#034;survived&amp;#034;, &amp;#034;died&amp;#034;}, testData[[All, -1]], cres]), {i, rocRange}];&#xD;
    rocEnGr = ROCPlot[rocRange, aROCs, &amp;#034;PlotJoined&amp;#034; -&amp;gt; Automatic, PlotLabel -&amp;gt; &amp;#034;Classifier ensemble&amp;#034;, GridLines -&amp;gt; Automatic]&#xD;
&#xD;
[![Ensemble2][5]][5]&#xD;
&#xD;
## Comparison of the ensemble classifier with the standard classifiers&#xD;
&#xD;
This plot compares the ROC curve of the ensemble classifier with the ROC curves of the classifiers that comprise the ensemble.&#xD;
&#xD;
    rocGRs = Table[&#xD;
       aROCs1 = Table[(&#xD;
          cres = ClassifyByThreshold[aCLs[[i]], testData[[All, 1 ;; -2]], &amp;#034;survived&amp;#034; -&amp;gt; th];&#xD;
          ToROCAssociation[{&amp;#034;survived&amp;#034;, &amp;#034;died&amp;#034;}, testData[[All, -1]], cres]), {th, rocRange}]; &#xD;
       ROCPlot[rocRange, aROCs1, PlotLabel -&amp;gt; Keys[aCLs][[i]], PlotRange -&amp;gt; {{0, 1.05}, {0.6, 1.01}}, &amp;#034;PlotJoined&amp;#034; -&amp;gt; Automatic, GridLines -&amp;gt; Automatic],&#xD;
       {i, 1, Length[aCLs]}];&#xD;
&#xD;
    GraphicsGrid[ArrayReshape[Append[Prepend[rocGRs, rocEnGr], rocEnGr], {2, 4}, &amp;#034;&amp;#034;], Dividers -&amp;gt; All, FrameStyle -&amp;gt; GrayLevel[0.8], ImageSize -&amp;gt; 1200]&#xD;
&#xD;
[![Ensemble3][6]][6]&#xD;
&#xD;
Let us plot all ROC curves from the graphics grid above into one plot. For that the single classifier ROC curves are made gray, and their threshold callouts removed. We can see that the classifier ensemble brings very good results for $\theta = 0.175$ and none of the single classifiers has a better point.&#xD;
&#xD;
    Show[Append[rocGRs /. {RGBColor[___] -&amp;gt; GrayLevel[0.8]} /. {Text[p_, ___] :&amp;gt; Null} /. ((PlotLabel -&amp;gt; _) :&amp;gt; (PlotLabel -&amp;gt; Null)), rocEnGr]]&#xD;
&#xD;
[![EnsembleFull][7]][7]&#xD;
&#xD;
# Classifier ensembles by bootstrapping&#xD;
&#xD;
There are several ways to produce ensemble classifiers using [bootstrapping](https://en.wikipedia.org/wiki/Bootstrapping_(statistics)) or [jackknife](https://en.wikipedia.org/wiki/Jackknife_resampling) resampling procedures.&#xD;
&#xD;
First, we are going to make a bootstrapping classifier ensemble using one of the `Classify` methods. Then we are going to make a more complicated bootstrapping classifier with six methods of `Classify`.&#xD;
&#xD;
## Bootstrapping ensemble with a single classification method&#xD;
&#xD;
First we select a classification method and make a classifier with it.&#xD;
&#xD;
    clMethod = &amp;#034;NearestNeighbors&amp;#034;;&#xD;
    sCL = Classify[trainingData[[All, 1 ;; -2]] -&amp;gt; trainingData[[All, -1]], Method -&amp;gt; clMethod];&#xD;
&#xD;
The following code makes a classifier ensemble of 12 classifier functions using resampled, slightly smaller (10%) versions of the original training data (with `RandomChoice`).&#xD;
&#xD;
    SeedRandom[1262];&#xD;
    aBootStrapCLs = Association@Table[(&#xD;
         inds = RandomChoice[Range[Length[trainingData]], Floor[0.9*Length[trainingData]]];&#xD;
         ToString[i] -&amp;gt; Classify[trainingData[[inds, 1 ;; -2]] -&amp;gt; trainingData[[inds, -1]], Method -&amp;gt; clMethod]), {i, 12}];&#xD;
&#xD;
Let us compare the ROC curves of the single classifier with the bootstrapping derived ensemble.&#xD;
&#xD;
    rocRange = Range[0.1, 0.9, 0.025];&#xD;
    AbsoluteTiming[&#xD;
     aSingleROCs = Table[(&#xD;
        cres = ClassifyByThreshold[sCL, testData[[All, 1 ;; -2]], &amp;#034;survived&amp;#034; -&amp;gt; i]; ToROCAssociation[{&amp;#034;survived&amp;#034;, &amp;#034;died&amp;#034;}, testData[[All, -1]], cres]), {i, rocRange}];&#xD;
     aBootStrapROCs = Table[(&#xD;
        cres = EnsembleClassifyByThreshold[aBootStrapCLs, testData[[All, 1 ;; -2]], &amp;#034;survived&amp;#034; -&amp;gt; i]; ToROCAssociation[{&amp;#034;survived&amp;#034;, &amp;#034;died&amp;#034;}, testData[[All, -1]], cres]), {i, rocRange}];&#xD;
    ]&#xD;
&#xD;
    (* {6.81521, Null} *)&#xD;
&#xD;
    Legended[&#xD;
     Show[{&#xD;
       ROCPlot[rocRange, aSingleROCs, &amp;#034;ROCColor&amp;#034; -&amp;gt; Blue, &amp;#034;PlotJoined&amp;#034; -&amp;gt; Automatic, GridLines -&amp;gt; Automatic],&#xD;
       ROCPlot[rocRange, aBootStrapROCs, &amp;#034;ROCColor&amp;#034; -&amp;gt; Red, &amp;#034;PlotJoined&amp;#034; -&amp;gt; Automatic]}],&#xD;
     SwatchLegend @@ Transpose@{{Blue, Row[{&amp;#034;Single &amp;#034;, clMethod, &amp;#034; classifier&amp;#034;}]}, {Red, Row[{&amp;#034;Boostrapping ensemble of\n&amp;#034;, Length[aBootStrapCLs], &amp;#034; &amp;#034;, clMethod, &amp;#034; classifiers&amp;#034;}]}}]&#xD;
&#xD;
[![Bootsrap1][8]][8]&#xD;
&#xD;
We can see that we get much better results with the bootstrapped ensemble.&#xD;
&#xD;
## Bootstrapping ensemble with multiple classifier methods&#xD;
&#xD;
This code creates an classifier ensemble using the classifier methods corresponding to `Automatic` given as a first argument to `EnsembleClassifier`.&#xD;
&#xD;
    SeedRandom[2324]&#xD;
    AbsoluteTiming[&#xD;
     aBootStrapLargeCLs = Association@Table[(&#xD;
          inds = RandomChoice[Range[Length[trainingData]], Floor[0.9*Length[trainingData]]];&#xD;
          ecls = EnsembleClassifier[Automatic, trainingData[[inds, 1 ;; -2]] -&amp;gt; trainingData[[inds, -1]]];&#xD;
          AssociationThread[Map[# &amp;lt;&amp;gt; &amp;#034;-&amp;#034; &amp;lt;&amp;gt; ToString[i] &amp;amp;, Keys[ecls]] -&amp;gt; Values[ecls]]&#xD;
         ), {i, 12}];&#xD;
    ]&#xD;
&#xD;
    (* {27.7975, Null} *)&#xD;
&#xD;
This code computes the ROC statistics with the obtained bootstrapping classifier ensemble:&#xD;
&#xD;
    AbsoluteTiming[&#xD;
     aBootStrapLargeROCs = Table[(&#xD;
         cres = EnsembleClassifyByThreshold[aBootStrapLargeCLs, testData[[All, 1 ;; -2]], &amp;#034;survived&amp;#034; -&amp;gt; i]; ToROCAssociation[{&amp;#034;survived&amp;#034;, &amp;#034;died&amp;#034;}, testData[[All, -1]], cres]), {i, rocRange}];&#xD;
    ]&#xD;
&#xD;
    (* {45.1995, Null} *)&#xD;
&#xD;
Let us plot the ROC curve of the bootstrapping classifier ensemble (in blue) and the single classifier ROC curves (in gray):&#xD;
&#xD;
    aBootStrapLargeGr = ROCPlot[rocRange, aBootStrapLargeROCs, &amp;#034;PlotJoined&amp;#034; -&amp;gt; Automatic];&#xD;
    Show[Append[rocGRs /. {RGBColor[___] -&amp;gt; GrayLevel[0.8]} /. {Text[p_, ___] :&amp;gt; Null} /. ((PlotLabel -&amp;gt; _) :&amp;gt; (PlotLabel -&amp;gt; Null)), aBootStrapLargeGr]]&#xD;
&#xD;
[![BootsrapFull][9]][9]&#xD;
&#xD;
Again we can see that the bootstrapping ensemble produced better ROC points than the single classifiers.&#xD;
&#xD;
# Damaging data&#xD;
&#xD;
This section tries to explain why the bootstrapping with resampling to smaller sizes produces good results.&#xD;
&#xD;
In short, the training data has outliers; if we remove small fraction of the training data we might get better results.&#xD;
&#xD;
The procedure described in this section can be used in conjunction with the procedures described in the guide for importance of variables investigation \[[7](https://github.com/antononcube/MathematicaForPrediction/blob/master/MarkdownDocuments/Importance-of-variables-investigation-guide.md)\].&#xD;
&#xD;
## Ordering function&#xD;
&#xD;
Let us replace the categorical values with numerical in the training data. There are several ways to do it, here is a fairly straightforward one:&#xD;
&#xD;
    nTrainingData = trainingData /. {&amp;#034;survived&amp;#034; -&amp;gt; 1, &amp;#034;died&amp;#034; -&amp;gt; 0, &amp;#034;1st&amp;#034; -&amp;gt; 0, &amp;#034;2nd&amp;#034; -&amp;gt; 1, &amp;#034;3rd&amp;#034; -&amp;gt; 2, &amp;#034;male&amp;#034; -&amp;gt; 0, &amp;#034;female&amp;#034; -&amp;gt; 1};&#xD;
&#xD;
## Decreasing proportions of females&#xD;
&#xD;
First, let us find all indices corresponding to records about females.&#xD;
&#xD;
    femaleInds = Flatten@Position[trainingData[[All, 3]], &amp;#034;female&amp;#034;];&#xD;
&#xD;
The following code standardizes the training data corresponding to females, finds the mean record, computes distances from the mean record, and finally orders the female records indices according to their distances from the mean record.&#xD;
&#xD;
    t = Transpose@Map[Rescale@*Standardize, N@Transpose@nTrainingData[[femaleInds, 1 ;; 2]]];&#xD;
    m = Mean[t];&#xD;
    ds = Map[EuclideanDistance[#, m] &amp;amp;, t];&#xD;
    femaleInds = femaleInds[[Reverse@Ordering[ds]]];&#xD;
&#xD;
The following plot shows the distances calculated above.&#xD;
&#xD;
    ListPlot[Sort@ds, PlotRange -&amp;gt; All, PlotTheme -&amp;gt; &amp;#034;Detailed&amp;#034;]&#xD;
&#xD;
[![FemaleRecord][10]][10]&#xD;
&#xD;
The following code removes from the training data the records corresponding to females according to the order computed above. The female records farthest from the mean female record are removed first.&#xD;
&#xD;
    AbsoluteTiming[&#xD;
     femaleFrRes = Association@&#xD;
        Table[cl -&amp;gt;&#xD;
          Table[(&#xD;
            inds = Complement[Range[Length[trainingData]], Take[femaleInds, Ceiling[fr*Length[femaleInds]]]];&#xD;
            cf = Classify[trainingData[[inds, 1 ;; -2]] -&amp;gt; trainingData[[inds, -1]], Method -&amp;gt; cl]; cfPredictedLabels = cf /@ testData[[All, 1 ;; -2]];&#xD;
            {fr, ToROCAssociation[{&amp;#034;survived&amp;#034;, &amp;#034;died&amp;#034;}, testData[[All, -1]], cfPredictedLabels]}),&#xD;
           {fr, 0, 0.8, 0.05}],&#xD;
         {cl, {&amp;#034;NearestNeighbors&amp;#034;, &amp;#034;NeuralNetwork&amp;#034;, &amp;#034;LogisticRegression&amp;#034;, &amp;#034;RandomForest&amp;#034;, &amp;#034;SupportVectorMachine&amp;#034;, &amp;#034;NaiveBayes&amp;#034;}}];&#xD;
    ]&#xD;
&#xD;
    (* {203.001, Null} *)&#xD;
&#xD;
The following graphics grid shows how the classification results are affected by the removing fractions of the female records from the training data. The results for none or small fractions of records removed are more blue. &#xD;
&#xD;
    GraphicsGrid[ArrayReshape[&#xD;
      Table[&#xD;
       femaleAROCs = femaleFrRes[cl][[All, 2]];&#xD;
       frRange = femaleFrRes[cl][[All, 1]]; ROCPlot[frRange, femaleAROCs, PlotRange -&amp;gt; {{0.0, 0.25}, {0.2, 0.8}}, PlotLabel -&amp;gt; cl, &amp;#034;ROCPointColorFunction&amp;#034; -&amp;gt; (Blend[{Blue, Red}, #3/Length[frRange]] &amp;amp;), ImageSize -&amp;gt; 300],&#xD;
       {cl, Keys[femaleFrRes]}],&#xD;
      {2, 3}], Dividers -&amp;gt; All]&#xD;
&#xD;
[![FemaleGrid][11]][11]&#xD;
&#xD;
We can see that removing the female records outliers has dramatic effect on the results by the classifiers &amp;#034;NearestNeighbors&amp;#034; and &amp;#034;NeuralNetwork&amp;#034;. Not so much on &amp;#034;LogisticRegression&amp;#034; and &amp;#034;NaiveBayes&amp;#034;.&#xD;
&#xD;
## Decreasing proportions of males&#xD;
&#xD;
The code in this sub-section repeats the experiment described in the previous one males (instead of females).&#xD;
&#xD;
    maleInds = Flatten@Position[trainingData[[All, 3]], &amp;#034;male&amp;#034;];&#xD;
&#xD;
    t = Transpose@Map[Rescale@*Standardize, N@Transpose@nTrainingData[[maleInds, 1 ;; 2]]];&#xD;
    m = Mean[t];&#xD;
    ds = Map[EuclideanDistance[#, m] &amp;amp;, t];&#xD;
    maleInds = maleInds[[Reverse@Ordering[ds]]];&#xD;
&#xD;
    ListPlot[Sort@ds, PlotRange -&amp;gt; All, PlotTheme -&amp;gt; &amp;#034;Detailed&amp;#034;]&#xD;
&#xD;
[![MaleRecord][12]][12]&#xD;
&#xD;
    AbsoluteTiming[&#xD;
     maleFrRes = Association@&#xD;
        Table[cl -&amp;gt;&#xD;
          Table[(&#xD;
            inds = Complement[Range[Length[trainingData]], Take[maleInds, Ceiling[fr*Length[maleInds]]]];&#xD;
            cf = Classify[trainingData[[inds, 1 ;; -2]] -&amp;gt; trainingData[[inds, -1]], Method -&amp;gt; cl]; cfPredictedLabels = cf /@ testData[[All, 1 ;; -2]];&#xD;
            {fr, ToROCAssociation[{&amp;#034;survived&amp;#034;, &amp;#034;died&amp;#034;}, testData[[All, -1]], cfPredictedLabels]}),&#xD;
           {fr, 0, 0.8, 0.05}],&#xD;
         {cl, {&amp;#034;NearestNeighbors&amp;#034;, &amp;#034;NeuralNetwork&amp;#034;, &amp;#034;LogisticRegression&amp;#034;, &amp;#034;RandomForest&amp;#034;, &amp;#034;SupportVectorMachine&amp;#034;, &amp;#034;NaiveBayes&amp;#034;}}];&#xD;
    ]&#xD;
&#xD;
    (* {179.219, Null} *)&#xD;
&#xD;
    GraphicsGrid[ArrayReshape[&#xD;
      Table[&#xD;
       maleAROCs = maleFrRes[cl][[All, 2]];&#xD;
       frRange = maleFrRes[cl][[All, 1]]; ROCPlot[frRange, maleAROCs, PlotRange -&amp;gt; {{0.0, 0.35}, {0.55, 0.82}}, PlotLabel -&amp;gt; cl, &amp;#034;ROCPointColorFunction&amp;#034; -&amp;gt; (Blend[{Blue, Red}, #3/Length[frRange]] &amp;amp;), ImageSize -&amp;gt; 300],&#xD;
       {cl, Keys[maleFrRes]}],&#xD;
      {2, 3}], Dividers -&amp;gt; All]&#xD;
&#xD;
[![MaleGrid][13]][13]&#xD;
&#xD;
&#xD;
# Classifier interpolation&#xD;
&#xD;
Assume that we want a classifier that for a given representative set of $n$ items (records) assigns the positive label to an exactly $n_p$ of them. (Or very close to that number.)&#xD;
&#xD;
If we have two classifiers, one returning more positive items than $n_p$, the other less than $n_p$, then we can use geometric computations in &#xD;
the ROC space in order to obtain parameters for a classifier interpolation that will bring positive items close to $n_p$; see \[3\]. &#xD;
Below is given *Mathematica* code with explanations of how that classifier interpolation is done.&#xD;
&#xD;
Assume that by prior observations we know that for a given dataset of $n$ items the positive class consists of $\approx 0.09 n$ items. &#xD;
Assume that for a given unknown dataset of $n$ items we want $0.2 n$ of the items to be classified as positive. We can write the equation:&#xD;
&#xD;
$$ {FPR} * ((1-0.09) * n) + {TPR} * (0.09 * n) = 0.2 * n ,$$&#xD;
&#xD;
which can be simplified to&#xD;
&#xD;
$$ {FPR} * (1-0.09) + {TPR} * 0.09 = 0.2 .$$&#xD;
&#xD;
## The two classifiers&#xD;
&#xD;
Consider the following two classifiers.&#xD;
&#xD;
    cf1 = Classify[trainingData[[All, 1 ;; -2]] -&amp;gt; trainingData[[All, -1]], Method -&amp;gt; &amp;#034;RandomForest&amp;#034;];&#xD;
    cfROC1 = ToROCAssociation[{&amp;#034;survived&amp;#034;, &amp;#034;died&amp;#034;}, testData[[All, -1]], cf1[testData[[All, 1 ;; -2]]]]&#xD;
    (* &amp;lt;|&amp;#034;TruePositive&amp;#034; -&amp;gt; 82, &amp;#034;FalsePositive&amp;#034; -&amp;gt; 22, &amp;#034;TrueNegative&amp;#034; -&amp;gt; 170, &amp;#034;FalseNegative&amp;#034; -&amp;gt; 40|&amp;gt; *)&#xD;
&#xD;
    cf2 = Classify[trainingData[[All, 1 ;; -2]] -&amp;gt; trainingData[[All, -1]], Method -&amp;gt; &amp;#034;LogisticRegression&amp;#034;];&#xD;
    cfROC2 = ToROCAssociation[{&amp;#034;survived&amp;#034;, &amp;#034;died&amp;#034;}, testData[[All, -1]], cf2[testData[[All, 1 ;; -2]]]]&#xD;
    (* &amp;lt;|&amp;#034;TruePositive&amp;#034; -&amp;gt; 89, &amp;#034;FalsePositive&amp;#034; -&amp;gt; 37, &amp;#034;TrueNegative&amp;#034; -&amp;gt; 155, &amp;#034;FalseNegative&amp;#034; -&amp;gt; 33|&amp;gt; *)&#xD;
&#xD;
## Geometric computations in the ROC space&#xD;
&#xD;
Here are the ROC space points corresponding to the two classifiers, cf1 and cf2: &#xD;
&#xD;
    p1 = Through[ROCFunctions[{&amp;#034;FPR&amp;#034;, &amp;#034;TPR&amp;#034;}][cfROC1]];&#xD;
    p2 = Through[ROCFunctions[{&amp;#034;FPR&amp;#034;, &amp;#034;TPR&amp;#034;}][cfROC2]];&#xD;
&#xD;
Here is the breakdown of frequencies of the class labels:&#xD;
&#xD;
    Tally[trainingData[[All, -1]]]&#xD;
    %[[All, 2]]/Length[trainingData] // N&#xD;
&#xD;
    (* {{&amp;#034;survived&amp;#034;, 305}, {&amp;#034;died&amp;#034;, 427}}&#xD;
       {0.416667, 0.583333}) *)&#xD;
&#xD;
We want to our classifier to produce $38$% people to survive. Here we find two points of the corresponding constraint line (on which we ROC points of the desired classifiers should reside):&#xD;
&#xD;
    sol1 = Solve[{{x, y} \[Element] ImplicitRegion[{x (1 - 0.42) + y 0.42 == 0.38}, {x, y}], x == 0.1}, {x, y}][[1]]&#xD;
    sol2 = Solve[{{x, y} \[Element] ImplicitRegion[{x (1 - 0.42) + y 0.42 == 0.38}, {x, y}], x == 0.25}, {x, y}][[1]]&#xD;
&#xD;
    (* {x -&amp;gt; 0.1, y -&amp;gt; 0.766667}&#xD;
       {x -&amp;gt; 0.25, y -&amp;gt; 0.559524} *)&#xD;
&#xD;
Here using the points q1 and q2 of the constraint line we find the intersection point with the line connecting the ROC points of the classifiers:&#xD;
&#xD;
    {q1, q2} = {{x, y} /. sol1, {x, y} /. sol2};&#xD;
    sol = Solve[ {{x, y} \[Element] InfiniteLine[{q1, q2}] \[And] {x, y} \[Element] InfiniteLine[{p1, p2}]}, {x, y}];&#xD;
    q = {x, y} /. sol[[1]]&#xD;
&#xD;
    (* {0.149753, 0.69796} *)&#xD;
&#xD;
Let us plot all geometric objects:&#xD;
&#xD;
    Graphics[{PointSize[0.015], Blue, Tooltip[Point[p1], &amp;#034;cf1&amp;#034;], Black, &#xD;
      Text[&amp;#034;cf1&amp;#034;, p1, {-1.5, 1}], Red, Tooltip[Point[p2], &amp;#034;cf2&amp;#034;], Black, &#xD;
      Text[&amp;#034;cf2&amp;#034;, p2, {1.5, -1}], Black, Point[q], Dashed, &#xD;
      InfiniteLine[{q1, q2}], Thin, InfiniteLine[{p1, p2}]}, &#xD;
     PlotRange -&amp;gt; {{0., 0.3}, {0.6, 0.8}}, &#xD;
     GridLines -&amp;gt; Automatic, Frame -&amp;gt; True]&#xD;
&#xD;
![Classifier-Interpolation-geometric-objects][15]&#xD;
&#xD;
## Classifier interpolation&#xD;
&#xD;
Next we find the ratio of the distance from the intersection point q to the cf1 ROC point and the distance between the ROC points of cf1 and cf2.&#xD;
&#xD;
    k = Norm[p1 - q]/Norm[p1 - p2]&#xD;
    (* 0.450169 *)&#xD;
&#xD;
The classifier interpolation is made by a weighted random selection based on that ratio (using `RandomChoice`):&#xD;
&#xD;
    SeedRandom[8989]&#xD;
    cres = MapThread[If, {RandomChoice[{1 - k, k} -&amp;gt; {True, False}, Length[testData]], cf1@testData[[All, 1 ;; -2]], cf2@testData[[All, 1 ;; -2]]}];&#xD;
    cfROC3 = ToROCAssociation[{&amp;#034;survived&amp;#034;, &amp;#034;died&amp;#034;}, testData[[All, -1]], cres];&#xD;
    p3 = Through[ROCFunctions[{&amp;#034;FPR&amp;#034;, &amp;#034;TPR&amp;#034;}][cfROC3]];&#xD;
    Graphics[{PointSize[0.015], Blue, Point[p1], Red, Point[p2], Black, Dashed, InfiniteLine[{q1, q2}], Green, Point[p3]}, &#xD;
     PlotRange -&amp;gt; {{0., 0.3}, {0.6, 0.8}}, &#xD;
     GridLines -&amp;gt; Automatic, Frame -&amp;gt; True]&#xD;
&#xD;
![Classifier-Interpolation-single-results][16]&#xD;
&#xD;
We can run the process multiple times in order to convince ourselves that the interpolated classifier ROC point is very close to the constraint line most of the time.&#xD;
&#xD;
    p3s =&#xD;
      Table[(&#xD;
        cres = &#xD;
         MapThread[If, {RandomChoice[{1 - k, k} -&amp;gt; {True, False}, Length[testData]], cf1@testData[[All, 1 ;; -2]], cf2@testData[[All, 1 ;; -2]]}]; &#xD;
        cfROC3 = ToROCAssociation[{&amp;#034;survived&amp;#034;, &amp;#034;died&amp;#034;}, testData[[All, -1]], cres];&#xD;
        Through[ROCFunctions[{&amp;#034;FPR&amp;#034;, &amp;#034;TPR&amp;#034;}][cfROC3]]), {1000}];&#xD;
&#xD;
    Show[{SmoothDensityHistogram[p3s, ColorFunction -&amp;gt; (Blend[{White, Green}, #] &amp;amp;), Mesh -&amp;gt; 3], &#xD;
      Graphics[{PointSize[0.015], Blue, Tooltip[Point[p1], &amp;#034;cf1&amp;#034;], Black, Text[&amp;#034;cf1&amp;#034;, p1, {-1.5, 1}], &#xD;
         Red, Tooltip[Point[p2], &amp;#034;cf2&amp;#034;], Black, Text[&amp;#034;cf2&amp;#034;, p2, {1.5, -1}], &#xD;
         Black, Dashed, InfiniteLine[{q1, q2}]}, GridLines -&amp;gt; Automatic]}, &#xD;
     PlotRange -&amp;gt; {{0., 0.3}, {0.6, 0.8}}, &#xD;
     GridLines -&amp;gt; Automatic, Axes -&amp;gt; True, &#xD;
     AspectRatio -&amp;gt; Automatic]&#xD;
&#xD;
![Classifier-Interpolation-1000-results][17]&#xD;
&#xD;
&#xD;
&#xD;
# References&#xD;
&#xD;
\[1\] Leo Breiman, Statistical Modeling: The Two Cultures, (2001), Statistical Science, Vol. 16, No. 3, 199\[Dash]231.&#xD;
&#xD;
\[2\] Wikipedia entry, Receiver operating characteristic. URL: http://en.wikipedia.org/wiki/Receiver_operating_characteristic .&#xD;
&#xD;
\[3\] Tom Fawcett, An introduction to ROC analysis, (2006), Pattern Recognition Letters, 27, 861\[Dash]874. ([Link to PDF](Link to PDF).)&#xD;
&#xD;
\[4\] Anton Antonov, [MathematicaForPrediction utilities](https://github.com/antononcube/MathematicaForPrediction/blob/master/MathematicaForPredictionUtilities.m), (2014), source code [MathematicaForPrediction at GitHub](https://github.com/antononcube/MathematicaForPrediction), package [MathematicaForPredictionUtilities.m](https://raw.githubusercontent.com/antononcube/MathematicaForPrediction/master/MathematicaForPredictionUtilities.m).&#xD;
&#xD;
\[5\] Anton Antonov, [Receiver operating characteristic functions Mathematica package](https://github.com/antononcube/MathematicaForPrediction/blob/master/ROCFunctions.m), (2016), source code [MathematicaForPrediction at GitHub](https://github.com/antononcube/MathematicaForPrediction), package [ROCFunctions.m](https://raw.githubusercontent.com/antononcube/MathematicaForPrediction/master/ROCFunctions.m).&#xD;
&#xD;
\[6\] Anton Antonov, [Classifier ensembles functions Mathematica package](https://github.com/antononcube/MathematicaForPrediction/blob/master/ClassifierEnsembles.m), (2016),  source code [MathematicaForPrediction at GitHub](https://github.com/antononcube/MathematicaForPrediction), package [ClassifierEnsembles.m](https://raw.githubusercontent.com/antononcube/MathematicaForPrediction/master/ClassifierEnsembles.m).&#xD;
&#xD;
\[7\] Anton Antonov, &amp;#034;[Importance of variables investigation guide](https://github.com/antononcube/MathematicaForPrediction/blob/master/Documentation/Importance-of-variables-investigation-guide.pdf)&amp;#034;, (2016),  [MathematicaForPrediction at GitHub](https://github.com/antononcube/MathematicaForPrediction), https://github.com/antononcube/MathematicaForPrediction, folder [Documentation](https://github.com/antononcube/MathematicaForPrediction/tree/master/Documentation).&#xD;
&#xD;
[1]:http://i.imgur.com/WDNgwNf.png&#xD;
[2]:http://i.imgur.com/G6DD5DD.png&#xD;
[3]:http://i.imgur.com/annEj2s.png&#xD;
[4]:http://i.imgur.com/c4pPgnq.png&#xD;
[5]:http://i.imgur.com/JZ6JpfY.png&#xD;
[6]:http://i.imgur.com/C9ZBbEv.png&#xD;
[7]:http://i.imgur.com/jmdC521.png&#xD;
[8]:http://i.imgur.com/faspQpc.png&#xD;
[9]:http://i.imgur.com/IZLiMer.png&#xD;
[10]:http://i.imgur.com/XMPsiNO.png&#xD;
[11]:http://i.imgur.com/BkDTZXD.png&#xD;
[12]:http://i.imgur.com/bZY8pL3.png&#xD;
[13]:http://i.imgur.com/Te2kDxh.png&#xD;
[14]:http://i.imgur.com/OcCFBBH.png&#xD;
[15]:http://i.imgur.com/brboKWy.png&#xD;
[16]:http://i.imgur.com/uP4aexV.png&#xD;
[17]:http://i.imgur.com/8cZHK2o.png</description>
    <dc:creator>Anton Antonov</dc:creator>
    <dc:date>2016-10-15T17:12:50Z</dc:date>
  </item>
  <item rdf:about="https://community.wolfram.com/groups/-/m/t/3212029">
    <title>[WSS24] Evolving finite automata that play evolutionary games</title>
    <link>https://community.wolfram.com/groups/-/m/t/3212029</link>
    <description>&amp;amp;[Wolfram Notebook][1]&#xD;
&#xD;
&#xD;
&amp;amp;[Wolfram Notebook][2]&#xD;
&#xD;
&#xD;
  [1]: https://www.wolframcloud.com/obj/28de152c-6a65-4afb-817f-c0465d833c8f&#xD;
  [2]: https://www.wolframcloud.com/obj/741d9d58-337d-448f-bb17-d44f22b2c847</description>
    <dc:creator>Paul Harrald</dc:creator>
    <dc:date>2024-07-10T18:24:57Z</dc:date>
  </item>
  <item rdf:about="https://community.wolfram.com/groups/-/m/t/3209918">
    <title>[WSS24] Hidden in plain sight: The Vinblastine reimbursement gap in Medicaid data (1991-2022)</title>
    <link>https://community.wolfram.com/groups/-/m/t/3209918</link>
    <description>Apologies in advance for any technical difficulties that the primary author could not overcome with her elementary grasp of the Wolfram language at this time.&#xD;
&#xD;
&amp;amp;[Wolfram Notebook][1]&#xD;
&#xD;
&#xD;
  [1]: https://www.wolframcloud.com/obj/03cb3ae2-4dc5-4c08-8b60-48489d4d8d4b</description>
    <dc:creator>Phuong (Sophie) Le</dc:creator>
    <dc:date>2024-07-10T01:04:50Z</dc:date>
  </item>
  <item rdf:about="https://community.wolfram.com/groups/-/m/t/3204807">
    <title>Find the function for a decision variable</title>
    <link>https://community.wolfram.com/groups/-/m/t/3204807</link>
    <description>Hi, I&amp;#039;m currently trying to find the equation for a decision variable from a total profit equation using Mathematica. From what I&amp;#039;ve read, I can use the solve function to do this. Here&amp;#039;s a example i found:&#xD;
&#xD;
    (*Define the total cost function*)&#xD;
    TC[Q_] := (D S/Q) + (H Q/2)&#xD;
    &#xD;
    (*Compute the first derivative*)&#xD;
    dTCdQ = D[TC[Q], Q]&#xD;
    &#xD;
    (*Solve for Q*)&#xD;
    solution = Solve[dTCdQ == 0, Q]&#xD;
    &#xD;
    (*Simplify the solution*)&#xD;
    simplifiedSolution = Simplify[solution]&#xD;
&#xD;
However, when I try to apply this to my actual equation, the program runs for hours without completing (it stuck). Here is my specific code:&#xD;
&#xD;
    TP[x_] = pr/x ((a (1 - b) (x^2))/(2 L) ((y g)/(1 + y g)))^(1/(&#xD;
       1 - b)) - ((c + 1) (Kr/x + Kp/(n x) + Kf/(&#xD;
          n x) + (hr + g)/&#xD;
           x (2 x (-((a (-1 + b) g x^2 y)/(L + g L y)))^(1/(1 - b))) + (&#xD;
           hp + g)/(2 R x) ((a (1 - b) (x^2))/(2 L) ((y g)/(1 + y g)))^(&#xD;
           2/(1 - b)) + ((hp + g) (n - 1))/(&#xD;
           2 x) ((a (1 - b) (x^2))/(2 L) ((y g)/(1 + y g)))^(2/(&#xD;
           1 - b)) (x ((a (1 - b) (x^2))/(2 L) ((y g)/(1 + y g)))^(1/(&#xD;
              1 - b)) - 1/R) + &#xD;
          1/x^2 S + (v/x + vp/(n x)) ((a (1 - b) (x^2))/(&#xD;
             2 L) ((y g)/(1 + y g)))^(1/(1 - b)) + &#xD;
          1/(n x)^2 Sv + (Pv w)/( &#xD;
           x f ww) ((a (1 - b) (x^2))/(2 L) ((y g)/(1 + y g)))^(1/(&#xD;
           1 - b)) + (((cf f) + (m bk))/( &#xD;
            x f ww ) {k n x + &#xD;
              k/q (Log[1 + z E^(-q n x)] - Log[1 + z])}) )) + (c ((pp/x + &#xD;
            pf/x) ((a (1 - b) (x^2))/(2 L) ((y g)/(1 + y g)))^(1/(&#xD;
          1 - b)))) + (pr/x l (&#xD;
         2 a g (x^2)&#xD;
           y (((-((a (-1 + b) g x^2 y)/(L + g L y)))^(1/(1 - b)))^b) )/(&#xD;
         L + g L y) - ((a (1 - b) (x^2))/(2 L) ((y g)/(1 + y g)))^(1/(&#xD;
         1 - b)) (M - x))&#xD;
    (*Compute the first derivative*)&#xD;
    dTPdx = D[TP[x], x]&#xD;
    &#xD;
    (*Solve for p*)&#xD;
    solution = Solve[dTPdx == 0, x]&#xD;
    &#xD;
    (*Simplify the solution*)&#xD;
    simplifiedSolution = Simplify[solution]&#xD;
&#xD;
Any suggestions on how to resolve this issue? Are there more efficient methods or best practices I should consider for solving complex equations in Mathematica?</description>
    <dc:creator>Shafa Hananta</dc:creator>
    <dc:date>2024-07-03T22:15:01Z</dc:date>
  </item>
  <item rdf:about="https://community.wolfram.com/groups/-/m/t/3149566">
    <title>LLM over RUSI&amp;#039;s &amp;#034;The Attritional Art of War&amp;#034; article</title>
    <link>https://community.wolfram.com/groups/-/m/t/3149566</link>
    <description>&amp;amp;[Wolfram Notebook][1]&#xD;
&#xD;
&#xD;
  [1]: https://www.wolframcloud.com/obj/9bb5c3f3-7d70-43d3-9f3c-501bb3d486fe</description>
    <dc:creator>Anton Antonov</dc:creator>
    <dc:date>2024-03-29T12:21:56Z</dc:date>
  </item>
  <item rdf:about="https://community.wolfram.com/groups/-/m/t/3055705">
    <title>Generalized Lanchester combat models</title>
    <link>https://community.wolfram.com/groups/-/m/t/3055705</link>
    <description>&amp;amp;[Wolfram Notebook][1]&#xD;
&#xD;
&#xD;
  [1]: https://www.wolframcloud.com/obj/b1da2e64-8dfd-491e-95ee-a00662e000bf</description>
    <dc:creator>Anton Antonov</dc:creator>
    <dc:date>2023-10-25T19:28:53Z</dc:date>
  </item>
  <item rdf:about="https://community.wolfram.com/groups/-/m/t/2920790">
    <title>Quantile regression 3D examples</title>
    <link>https://community.wolfram.com/groups/-/m/t/2920790</link>
    <description>&amp;amp;[Wolfram Notebook][1]&#xD;
&#xD;
&#xD;
  [1]: https://www.wolframcloud.com/obj/580db34a-30e4-447b-8db2-9df739ad718e</description>
    <dc:creator>Anton Antonov</dc:creator>
    <dc:date>2023-05-17T15:18:40Z</dc:date>
  </item>
  <item rdf:about="https://community.wolfram.com/groups/-/m/t/3050770">
    <title>[BOOK] A Field Theory of Games: Introduction to Decision Process Engineering, Volumes 1 and 2</title>
    <link>https://community.wolfram.com/groups/-/m/t/3050770</link>
    <description>![enter image description here][1]&#xD;
&#xD;
&amp;amp;[Wolfram Notebook][2]&#xD;
&#xD;
&#xD;
  [1]: https://community.wolfram.com//c/portal/getImageAttachment?filename=Leadcover.png&amp;amp;userId=20103&#xD;
  [2]: https://www.wolframcloud.com/obj/5c9309a1-88d0-4912-841b-f8dd6e23091e</description>
    <dc:creator>Gerald Thomas</dc:creator>
    <dc:date>2023-10-17T20:11:20Z</dc:date>
  </item>
  <item rdf:about="https://community.wolfram.com/groups/-/m/t/3017901">
    <title>Privatization under Political Ties</title>
    <link>https://community.wolfram.com/groups/-/m/t/3017901</link>
    <description>&amp;amp;[Wolfram Notebook][1]&#xD;
&#xD;
&#xD;
  [1]: https://www.wolframcloud.com/obj/88e82c7f-61b9-4edd-a2c6-45ef12c405b9</description>
    <dc:creator>Matteo Broso</dc:creator>
    <dc:date>2023-09-21T19:27:08Z</dc:date>
  </item>
  <item rdf:about="https://community.wolfram.com/groups/-/m/t/2997003">
    <title>The coverage planning for ground robot with constraints on the path length and time</title>
    <link>https://community.wolfram.com/groups/-/m/t/2997003</link>
    <description>![enter image description here][1]&#xD;
&#xD;
&#xD;
&#xD;
&amp;amp;[Wolfram Notebook][3]&#xD;
&#xD;
&#xD;
  [1]: https://community.wolfram.com//c/portal/getImageAttachment?filename=sdg54wdfg5w4.jpg&amp;amp;userId=11733&#xD;
  [2]: https://community.wolfram.com//c/portal/getImageAttachment?filename=7500dfgq45htwrgsfbd.jpg&amp;amp;userId=11733&#xD;
  [3]: https://www.wolframcloud.com/obj/b3fc3860-fcb7-4dd2-9add-d277e0745a33</description>
    <dc:creator>Alexander Kuznetsov</dc:creator>
    <dc:date>2023-08-23T17:49:42Z</dc:date>
  </item>
  <item rdf:about="https://community.wolfram.com/groups/-/m/t/2987807">
    <title>Facilitating Collusion with Spot-Price Contracting</title>
    <link>https://community.wolfram.com/groups/-/m/t/2987807</link>
    <description>![enter image description here][1]&#xD;
&#xD;
&amp;amp;[Wolfram Notebook][2]&#xD;
&#xD;
&#xD;
  [1]: https://community.wolfram.com//c/portal/getImageAttachment?filename=FacilitatingCollusionwithSpot-PriceContracting.png&amp;amp;userId=20103&#xD;
  [2]: https://www.wolframcloud.com/obj/5f363946-93c9-4689-a0d5-42c3628303c5</description>
    <dc:creator>John Hatfield</dc:creator>
    <dc:date>2023-08-10T14:48:47Z</dc:date>
  </item>
  <item rdf:about="https://community.wolfram.com/groups/-/m/t/2938862">
    <title>Wolfram Language conference in St. Petersburg</title>
    <link>https://community.wolfram.com/groups/-/m/t/2938862</link>
    <description>Two weeks ago (June 1st and 2nd) I participated in the Wolfram Language conference in St. Petersburg, Russia.&#xD;
Here are the corresponding announcements:&#xD;
&#xD;
- [&amp;#034;Конференция по Wolfram Language в Санкт-Петербурге&amp;#034;](https://pikabu.ru/story/konferentsiya_po_wolfram_language_v_sanktpeterburge_10203980)&#xD;
- [&amp;#034;Wolfram Language &amp;#x2014; Летняя Конференция в Санкт-Петербурге 2023&amp;#034;](https://habr.com/ru/news/732252/)&#xD;
&#xD;
The conference was co-organized by [Kirill Belov](https://community.wolfram.com/web/kirillbelovtest) and Petr Genadievich Tenishev.&#xD;
&#xD;
Here is a mind-map of the potential presentations Kirill and I discussed:&#xD;
&#xD;
![enter image description here][1]&#xD;
&#xD;
## System dynamics presentation&#xD;
&#xD;
I presented&#xD;
[&amp;#034;Динамические системы : Расширение и улучшение эпидемиологических моделей&amp;#034;](https://github.com/antononcube/SystemModeling/blob/master/Presentations/St-Petersburg-Russia-June-2023/Markdown/Dynamic-systems-and-extensions-Russian.md)&#xD;
(in English: &amp;#034;Dynamics systems: extending and improving epidemiological models&amp;#034;.)&#xD;
&#xD;
The main presentation slides had a dozen supplements:&#xD;
- [Diagrams](https://raw.githubusercontent.com/antononcube/SystemModeling/master/Presentations/St-Petersburg-Russia-June-2023/Diagrams)&#xD;
- NLP and AI utilization demos notebook&#xD;
- [Geo-spatial simulations of COVID-19 over Botswana notebook](https://github.com/antononcube/SystemModeling/blob/master/Presentations/St-Petersburg-Russia-June-2023/Markdown/COVID-19-simulations-over-Botswana-2023-Russian.md)&#xD;
&#xD;
![enter image description here][2]&#xD;
&#xD;
------&#xD;
&#xD;
## Making two presentations &#xD;
&#xD;
Interestingly, I first prepared a Latent Semantic Analysis (LSA) talk, but then found out&#xD;
that the organizers listed another talk I discussed with them, on extending dynamic systems models. &#xD;
(The latter one is the first we discussed, so, it was my &amp;#034;fault&amp;#034; that I wanted to talk about LSA.)&#xD;
&#xD;
Here are the presentation notebooks for LSA in &#xD;
[English](https://raw.githubusercontent.com/antononcube/SystemModeling/master/Presentations/St-Petersburg-Russia-June-2023/Notebooks/LSA-workflows-English.nb) and &#xD;
[Russian](https://raw.githubusercontent.com/antononcube/SystemModeling/master/Presentations/St-Petersburg-Russia-June-2023/Notebooks/LSA-workflows-Russian.nb).&#xD;
&#xD;
-------&#xD;
&#xD;
## Some afterthoughts&#xD;
&#xD;
- Тhe conference was very &amp;#034;strong&amp;#034;, my presentation was the &amp;#034;weakest.&amp;#034;&#xD;
&#xD;
    - With &amp;#034;strong&amp;#034; I refer to the content and style of the presentations.&#xD;
&#xD;
- This was also the first scientific presentation I gave in Russian.&#xD;
I also got a [participation diploma](https://raw.githubusercontent.com/antononcube/SystemModeling/master/Presentations/St-Petersburg-Russia-June-2023/Diagrams/Diplom.jpeg).&#xD;
&#xD;
- I used [PaLMMode](https://resources.wolframcloud.com/PacletRepository/resources/AntonAntonov/PaLMMode/) &#xD;
and [NLPTemplateEngine](https://resources.wolframcloud.com/PacletRepository/resources/AntonAntonov/NLPTemplateEngine/) &#xD;
to demonstrate generation of epidemiological modeling code.&#xD;
&#xD;
- Preparing for the conference reminded me of some the COVID-19 modeling hackathons I participated in.&#xD;
&#xD;
     - E.g. [&amp;#034;WirVsVirus&amp;#034;](https://mathematicaforprediction.wordpress.com/2020/03/24/wirvsvirus-2020-hackathon-participation/).&#xD;
&#xD;
- I prepared the initial models of artillery shells manufacturing, but much more work has to be done&#xD;
in order to have a meaningful article or presentation. (Hopefully, I am finishing that soon.)&#xD;
&#xD;
------&#xD;
&#xD;
## References&#xD;
&#xD;
### Articles, posts, presentations&#xD;
&#xD;
[AA1] Антон Антонов,&#xD;
[&amp;#034;Динамические системы : Расширение и улучшение эпидемиологических моделей&amp;#034;](./Markdown/Dynamic-systems-and-extensions-Russian.md).&#xD;
&#xD;
[AA2] Антон Антонов,&#xD;
[&amp;#034;COVID-19 modeling over Botswana&amp;#034;](./Markdown/COVID-19-simulations-over-Botswana-2023-Russian.md).&#xD;
&#xD;
[AA3] Anton Antonov,&#xD;
[&amp;#034;WirVsVirus 2020 hackathon participation&amp;#034;](https://mathematicaforprediction.wordpress.com/2020/03/24/wirvsvirus-2020-hackathon-participation/),&#xD;
(2020),&#xD;
[MathematicaForPrediction at WordPress](https://mathematicaforprediction.wordpress.com).&#xD;
&#xD;
### Videos&#xD;
&#xD;
*TBD..*&#xD;
&#xD;
&#xD;
  [1]: https://community.wolfram.com//c/portal/getImageAttachment?filename=WL-Conference-St.Petersburg-mind-map.png&amp;amp;userId=143837&#xD;
  [2]: https://community.wolfram.com//c/portal/getImageAttachment?filename=11p5r9gtpxdc9.png&amp;amp;userId=143837</description>
    <dc:creator>Anton Antonov</dc:creator>
    <dc:date>2023-06-16T14:43:28Z</dc:date>
  </item>
  <item rdf:about="https://community.wolfram.com/groups/-/m/t/220551">
    <title>Mosaic plots for data visualization</title>
    <link>https://community.wolfram.com/groups/-/m/t/220551</link>
    <description>I just published a blog post proclaiming the implementation of the function MosaicPlot that gives visual representation of the contingencies of categorical variables in a list of records. The blog post has examples and explanations:
[url=http://mathematicaforprediction.wordpress.com/2014/03/17/mosaic-plots-for-data-visualization/]http://mathematicaforprediction.wordpress.com/2014/03/17/mosaic-plots-for-data-visualization/[/url]

If we consider the census income data set known as the [url=http://archive.ics.uci.edu/ml/datasets/Census+Income]&amp;#034;adult data set&amp;#034;[/url] that is summarized in this table:

[img=width: 800px; height: 375px;]http://mathematicaforprediction.files.wordpress.com/2014/03/adult-census-income-data-summary.png[/img]

we visualize the co-occurence of (categorical variable) values with mosaic plots like this one:

[img=width: 500px; height: 506px;]http://mathematicaforprediction.files.wordpress.com/2014/03/adult-census-income-data-sex-education-colored-mosaic-plot.png[/img]

By comparing the sizes of the rectangles corresponding to values Bachelors, Doctorate, Masters, and Some-college on the sex vs. education mosaic plot we can see that the fraction of men that have finished college is larger than the fraction of women that have finished college.
We can further subdivide the rectangles according the co-occurrence frequencies with a third categorical variable. We are going to choose that third variable to be income, the values of which can be seen as outcomes or consequents of the values of the first two variables of the mosaic plot.

[img=width: 550px; height: 509px;]http://mathematicaforprediction.files.wordpress.com/2014/03/adult-census-income-data-sex-education-income-colored-mosaic-plot.png[/img]

From the mosaic plot &amp;#034;sex vs. education vs. income&amp;#034; we can make the following observations.1. Approximately 75% of the males with doctorate degrees or with a professional school degree earn more than $50000 per year.2. Approximately 60% of the females with a doctorate degree earn more than $50000 per year.3. Approximately 45% of the females with a professional school degree earn more than $50000.4. Across all education type females are (much) less likely to earn more than $50000 per year.</description>
    <dc:creator>Anton Antonov</dc:creator>
    <dc:date>2014-03-17T21:29:53Z</dc:date>
  </item>
  <item rdf:about="https://community.wolfram.com/groups/-/m/t/193644">
    <title>Symbolic Minimization Using the Karush-Kuhn-Tucker Conditions</title>
    <link>https://community.wolfram.com/groups/-/m/t/193644</link>
    <description>The following code does symbolic minimization using Reduce to solve the Karush-Kuhn-Tucker conditions.  No sign conditions are placed on the Lagrange multipliers, so it gives all maxima and minima.  The subscripts of the multipliers are the constraint they are multiplying.[mcode]consconvrule = {
   x_ &amp;gt;= y_ -&amp;gt; y - x,
   x_ == y_ -&amp;gt; x - y ,
   x_ &amp;lt;= y_ -&amp;gt; x - y,
   lb_ &amp;lt;= x_ &amp;lt;= ub_ -&amp;gt; (x - lb) (x - ub),
   ub_ &amp;gt;= x_ &amp;gt;= lb_ -&amp;gt; (x - lb) (x - ub)
   };

KKT[obj_, cons_List, vars_List] :=
 Module[
  {convcons, gradobj, gradcons, \[CapitalLambda]},
  convcons = (cons /. consconvrule);
  {gradobj, gradcons} = D[{obj, convcons}, {vars}];
  \[CapitalLambda] = Subscript[\[Lambda], #] &amp;amp; /@ cons;
  LogicalExpand @ Reduce[
    Flatten[{
      Thread[gradobj == \[CapitalLambda].gradcons],
      Thread[\[CapitalLambda]*convcons == 0] /. 
       Subscript[\[Lambda], Equal[a_, b_]] -&amp;gt; 0,
      cons,
      {objval == obj}
      }],
    Join[{objval}, vars, \[CapitalLambda]],
    Reals,
    Backsubstitution -&amp;gt; True
    ]
  ][/mcode]Here&amp;#039;s an example application[mcode]KKT[x + 2 y + 3 z, {x^2 + y^2 + z^2 == 1, 3 x + 2 y + z &amp;lt;= 1}, {x, y, 
  z}][/mcode][mcode](objval == -Sqrt[14] &amp;amp;&amp;amp; x == -(1/Sqrt[14]) &amp;amp;&amp;amp; y == -Sqrt[(2/7)] &amp;amp;&amp;amp; 
   z == -(3/Sqrt[14]) &amp;amp;&amp;amp; 
   Subscript[\[Lambda], x^2 + y^2 + z^2 == 1] == -Sqrt[(7/2)] &amp;amp;&amp;amp; 
   Subscript[\[Lambda], 3 x + 2 y + z &amp;lt;= 1] == 0) || 
(objval == 
    1/7 (5 - 2 Sqrt[78]) &amp;amp;&amp;amp; x == 1/42 (9 + 2 Sqrt[78]) &amp;amp;&amp;amp; 
   y == 1/42 (6 - Sqrt[78]) &amp;amp;&amp;amp; z == 1/42 (3 - 4 Sqrt[78]) &amp;amp;&amp;amp; 
   Subscript[\[Lambda], x^2 + y^2 + z^2 == 1] == -2 Sqrt[6/13] &amp;amp;&amp;amp; 
   Subscript[\[Lambda], 3 x + 2 y + z &amp;lt;= 1] == 
    1/91 (65 + 2 Sqrt[78])) || 
(objval == 1/7 (5 + 2 Sqrt[78]) &amp;amp;&amp;amp; 
   x == 1/42 (9 - 2 Sqrt[78]) &amp;amp;&amp;amp; y == 1/42 (6 + Sqrt[78]) &amp;amp;&amp;amp; 
   z == 1/42 (3 + 4 Sqrt[78]) &amp;amp;&amp;amp; 
   Subscript[\[Lambda], x^2 + y^2 + z^2 == 1] == 2 Sqrt[6/13] &amp;amp;&amp;amp; 
   Subscript[\[Lambda], 3 x + 2 y + z &amp;lt;= 1] == 1/91 (65 - 2 Sqrt[78]))[/mcode]</description>
    <dc:creator>Frank Kampas</dc:creator>
    <dc:date>2014-01-31T16:15:31Z</dc:date>
  </item>
</rdf:RDF>

