{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "f8b16455-d7c0-4d36-a922-5303932bf771",
   "metadata": {},
   "source": [
    "### Acquiring Ground Truth and LLMs annotations"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "b08d84b1-4f98-462c-ad55-d380c6bb3224",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "\n",
    "results_path = 'docs/Results/'\n",
    "\n",
    "models = ['Claude', 'DSChatbot', 'GPT', 'Qwen']\n",
    "\n",
    "results = {}\n",
    "for model in models:   \n",
    "    \n",
    "    xls = pd.ExcelFile(results_path+model+'_Cohens.xlsx')\n",
    "    \n",
    "    if model == models[0]:\n",
    "        gt = pd.read_excel(xls, 'GT')\n",
    "        gt = gt.drop(['Unnamed: 0', 'Annotator', 'Code', 'Please elaborate your reason of your answers in the previous two questions', 'Conf', 'Word Counts mod 10', 'Who Did it',\t'Who checked',\t'Checker comments',\t'Is conflict?',\t'Who resolve conflict'], axis=1)\n",
    "        gt = gt.fillna(0)\n",
    "        \n",
    "    results[model] = []\n",
    "    for p in range(4):\n",
    "        results[model].append(pd.read_excel(xls, model+'-p'+str(p)))\n",
    "        results[model][p] = results[model][p].drop(['Annotator', 'Code', 'Please elaborate your reason of your answers in the previous two questions'], axis=1)\n",
    "        results[model][p] = results[model][p].fillna(0)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0b16a3ab-6cd1-424e-ab75-fe99e3d1e652",
   "metadata": {
    "jp-MarkdownHeadingCollapsed": true
   },
   "source": [
    "### Computing True Positives, True Negatives, False Positives and False Negatives for each model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "id": "5c74f77b-2932-49e4-9f89-63a7e5d7a1d2",
   "metadata": {},
   "outputs": [],
   "source": [
    "tp = {}\n",
    "fp = {}\n",
    "tn = {}\n",
    "fn = {}\n",
    "for model in models:\n",
    "    tp[model] = []\n",
    "    fp[model] = []\n",
    "    tn[model] = []\n",
    "    fn[model] = []\n",
    "    for p in range(4):\n",
    "        tp[model].append({})\n",
    "        fp[model].append({})\n",
    "        tn[model].append({})\n",
    "        fn[model].append({})\n",
    "        for (columnName, columnData) in results[model][p].items():\n",
    "            columnGT = list(gt[columnName])\n",
    "            tp[model][p][columnName] = len([1 for a, b, in zip(columnData, columnGT) if a == 1 and b == 1])\n",
    "            fp[model][p][columnName] = len([1 for a, b, in zip(columnData, columnGT) if a == 1 and b == 0])\n",
    "            tn[model][p][columnName] = len([1 for a, b, in zip(columnData, columnGT) if a == 0 and b == 0])\n",
    "            fn[model][p][columnName] = len([1 for a, b, in zip(columnData, columnGT) if a == 0 and b == 1])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "24fe8be5-121f-45b5-b628-e59be71cf523",
   "metadata": {},
   "source": [
    "### Computing statistics per LLM, per feature, but across prompts and models"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "id": "ed24f16f-f3fd-4102-9aaf-3773bb5172d9",
   "metadata": {},
   "outputs": [],
   "source": [
    "prompts_precision = []\n",
    "for p in range(4):\n",
    "    prompt_precision = []\n",
    "    for model in models:\n",
    "        model_precision = []\n",
    "        for (columnName, columnData) in gt.items():\n",
    "            if tp[model][p][columnName]+fp[model][p][columnName]>0:\n",
    "                model_precision.append(tp[model][p][columnName]/(tp[model][p][columnName]+fp[model][p][columnName]))\n",
    "            else:\n",
    "                model_precision.append(0)\n",
    "        prompt_precision.append(sum(model_precision)/len(model_precision))\n",
    "    prompts_precision.append(sum(prompt_precision)/len(prompt_precision))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "id": "5043ccf2-dded-4561-9ab8-b82584e9aa69",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.6267043646953181, 0.6708933806921297, 0.5939737393123813, 0.680463087217283]"
      ]
     },
     "execution_count": 55,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "prompts_precision"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "id": "16192d52-3085-4217-bff6-9c6f95b7235b",
   "metadata": {},
   "outputs": [],
   "source": [
    "prompts_recall = []\n",
    "for p in range(4):\n",
    "    prompt_recall = []\n",
    "    for model in models:\n",
    "        model_recall = []\n",
    "        for (columnName, columnData) in gt.items():\n",
    "            if tp[model][p][columnName]+fn[model][p][columnName]>0:\n",
    "                model_recall.append(tp[model][p][columnName]/(tp[model][p][columnName]+fn[model][p][columnName]))\n",
    "            else:\n",
    "                model_recall.append(0)\n",
    "        prompt_recall.append(sum(model_recall)/len(model_recall))\n",
    "    prompts_recall.append(sum(prompt_recall)/len(prompt_recall))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "id": "61d9b88d-882c-4e00-8c2d-444180ac1a71",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.5066713361418673,\n",
       " 0.5567469914586984,\n",
       " 0.5352606163927222,\n",
       " 0.5847120079221494]"
      ]
     },
     "execution_count": 57,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "prompts_recall"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "id": "73b42af3-279e-472b-914b-71a8651b3895",
   "metadata": {},
   "outputs": [],
   "source": [
    "prompts_accuracy = []\n",
    "for p in range(4):\n",
    "    prompt_accuracy = []\n",
    "    for model in models:\n",
    "        model_accuracy = []\n",
    "        for (columnName, columnData) in gt.items():\n",
    "            correct = tp[model][p][columnName]+tn[model][p][columnName]\n",
    "            total = tp[model][p][columnName]+fp[model][p][columnName]+tn[model][p][columnName]+fn[model][p][columnName]\n",
    "            model_accuracy.append(correct/total)\n",
    "        prompt_accuracy.append(sum(model_accuracy)/len(model_accuracy))\n",
    "    prompts_accuracy.append(sum(prompt_accuracy)/len(prompt_accuracy))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "id": "542b15f7-aaf9-4891-ba36-7cf5f6889b4a",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.7817540939700031,\n",
       " 0.7859886746250382,\n",
       " 0.7626023492500765,\n",
       " 0.7894484618916436]"
      ]
     },
     "execution_count": 59,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "prompts_accuracy"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "id": "61487b2a-7568-488c-96ef-d8d6ad64cb37",
   "metadata": {},
   "outputs": [],
   "source": [
    "prompts_k = []\n",
    "for p in range(4):\n",
    "    prompt_k = []\n",
    "    for model in models:\n",
    "        model_k = []\n",
    "        for (columnName, columnData) in gt.items():\n",
    "            correct = tp[model][p][columnName]+tn[model][p][columnName]\n",
    "            alerts = tp[model][p][columnName]+fp[model][p][columnName]\n",
    "            positive = tp[model][p][columnName]+fn[model][p][columnName]\n",
    "            total = tp[model][p][columnName]+fp[model][p][columnName]+tn[model][p][columnName]+fn[model][p][columnName]\n",
    "            accuracy = correct/total\n",
    "            expected_accuracy = alerts/total * positive/total + (1 - alerts/total) * (1 - positive/total)\n",
    "            model_k.append((accuracy - expected_accuracy) / (1 - expected_accuracy))\n",
    "        prompt_k.append(sum(model_k)/len(model_k))\n",
    "    prompts_k.append(sum(prompt_k)/len(prompt_k))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "id": "a3ba7b73-0b9f-4a76-8044-6575e0294f45",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.3659740704930856,\n",
       " 0.41534997363663834,\n",
       " 0.34672365030593855,\n",
       " 0.4228619287565822]"
      ]
     },
     "execution_count": 61,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "prompts_k"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "28e86c58-d962-4be0-90c5-1e6a245f71cb",
   "metadata": {},
   "source": [
    "### Computing Cohen's kappa for each column, across different LLMs. Grouping per prompt and comparing the results with Wilcoxon's tests"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "id": "6c9e8c95-2173-4b19-8c5d-06e53c0cff1a",
   "metadata": {},
   "outputs": [],
   "source": [
    "prompts_k = [[],[],[],[]]\n",
    "for p in range(4):\n",
    "    prompt_k = []\n",
    "    for model in models:\n",
    "        model_k = []\n",
    "        for (columnName, columnData) in gt.items():\n",
    "            correct = tp[model][p][columnName]+tn[model][p][columnName]\n",
    "            alerts = tp[model][p][columnName]+fp[model][p][columnName]\n",
    "            positive = tp[model][p][columnName]+fn[model][p][columnName]\n",
    "            total = tp[model][p][columnName]+fp[model][p][columnName]+tn[model][p][columnName]+fn[model][p][columnName]\n",
    "            accuracy = correct/total\n",
    "            expected_accuracy = alerts/total * positive/total + (1 - alerts/total) * (1 - positive/total)\n",
    "            prompts_k[p].append((accuracy - expected_accuracy) / (1 - expected_accuracy))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "id": "248cb599-f4b8-4f31-a9c4-f1a4e2b22632",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WilcoxonResult(statistic=181.0, pvalue=0.07542865828732438)\n",
      "WilcoxonResult(statistic=181.0, pvalue=0.9622856708563378)\n",
      "WilcoxonResult(statistic=181.0, pvalue=0.03771432914366219)\n"
     ]
    }
   ],
   "source": [
    "from scipy.stats import wilcoxon\n",
    "\n",
    "print(wilcoxon(prompts_k[0], prompts_k[1], alternative='two-sided'))\n",
    "print(wilcoxon(prompts_k[0], prompts_k[1], alternative='greater'))\n",
    "print(wilcoxon(prompts_k[0], prompts_k[1], alternative='less'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "id": "fb5e9d53-4f16-4814-ab62-9150020b24b9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WilcoxonResult(statistic=173.0, pvalue=0.22102157481330342)\n",
      "WilcoxonResult(statistic=292.0, pvalue=0.11051078740665171)\n",
      "WilcoxonResult(statistic=292.0, pvalue=0.8894892125933482)\n"
     ]
    }
   ],
   "source": [
    "from scipy.stats import wilcoxon\n",
    "\n",
    "print(wilcoxon(prompts_k[1], prompts_k[2], alternative='two-sided'))\n",
    "print(wilcoxon(prompts_k[1], prompts_k[2], alternative='greater'))\n",
    "print(wilcoxon(prompts_k[1], prompts_k[2], alternative='less'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "id": "d57f6050-3d22-49b9-950f-b8621ed63923",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WilcoxonResult(statistic=30.0, pvalue=0.008903385115881386)\n",
      "WilcoxonResult(statistic=30.0, pvalue=0.9955483074420594)\n",
      "WilcoxonResult(statistic=30.0, pvalue=0.004451692557940693)\n"
     ]
    }
   ],
   "source": [
    "from scipy.stats import wilcoxon\n",
    "\n",
    "print(wilcoxon(prompts_k[2], prompts_k[3], alternative='two-sided'))\n",
    "print(wilcoxon(prompts_k[2], prompts_k[3], alternative='greater'))\n",
    "print(wilcoxon(prompts_k[2], prompts_k[3], alternative='less'))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "eda73101-1d85-43a0-9930-1be025fce6f6",
   "metadata": {},
   "source": [
    "### Computing statistics for prompt 3+ per model, across columns"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 78,
   "id": "3ce9c8d9-f819-4805-9087-6a6833cdc014",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Claude 0.5711558173961467\n",
      "DSChatbot 0.7920066360944336\n",
      "GPT 0.6178278539522558\n",
      "Qwen 0.7408620414262959\n"
     ]
    }
   ],
   "source": [
    "models_precision = []\n",
    "for model in models:\n",
    "    model_precision = []\n",
    "    for (columnName, columnData) in gt.items():\n",
    "        if tp[model][3][columnName]+fp[model][3][columnName]>0:\n",
    "            model_precision.append(tp[model][3][columnName]/(tp[model][3][columnName]+fp[model][3][columnName]))\n",
    "        else:\n",
    "            model_precision.append(0)\n",
    "    models_precision.append(sum(model_precision)/len(model_precision))\n",
    "    print(model, models_precision[-1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 73,
   "id": "1fd67852-a48f-47e5-a19d-f05cfe732673",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Claude 0.507255519149732\n",
      "DSChatbot 0.6367532631254493\n",
      "GPT 0.4657223530685516\n",
      "Qwen 0.7291168963448645\n"
     ]
    }
   ],
   "source": [
    "models_recall = []\n",
    "for model in models:\n",
    "    model_recall = []\n",
    "    for (columnName, columnData) in gt.items():\n",
    "        if tp[model][3][columnName]+fp[model][3][columnName]>0:\n",
    "            model_recall.append(tp[model][3][columnName]/(tp[model][3][columnName]+fn[model][3][columnName]))\n",
    "        else:\n",
    "            model_recall.append(0)\n",
    "    models_recall.append(sum(model_recall)/len(model_recall))\n",
    "    print(model, models_recall[-1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 74,
   "id": "82945058-4269-4f44-8e25-a4894eb87d5d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Claude 0.7355563207835936\n",
      "DSChatbot 0.828527701254974\n",
      "GPT 0.7351354453627181\n",
      "Qwen 0.8585743801652891\n"
     ]
    }
   ],
   "source": [
    "models_accuracy = []\n",
    "p=3\n",
    "for model in models:\n",
    "    model_accuracy = []\n",
    "    for (columnName, columnData) in gt.items():\n",
    "        correct = tp[model][p][columnName]+tn[model][p][columnName]\n",
    "        total = tp[model][p][columnName]+fp[model][p][columnName]+tn[model][p][columnName]+fn[model][p][columnName]\n",
    "        model_accuracy.append(correct/total)\n",
    "    models_accuracy.append(sum(model_accuracy)/len(model_accuracy))\n",
    "    print(model, models_accuracy[-1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "id": "34a2f0d4-cab1-48b0-afbd-2a9a4e002098",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Claude 0.2963876165367914\n",
      "DSChatbot 0.5261933521991904\n",
      "GPT 0.25869180114620494\n",
      "Qwen 0.6101749451441421\n"
     ]
    }
   ],
   "source": [
    "models_k = []\n",
    "p=3\n",
    "for model in models:\n",
    "    model_k = []\n",
    "    for (columnName, columnData) in gt.items():\n",
    "        correct = tp[model][p][columnName]+tn[model][p][columnName]\n",
    "        alerts = tp[model][p][columnName]+fp[model][p][columnName]\n",
    "        positive = tp[model][p][columnName]+fn[model][p][columnName]\n",
    "        total = tp[model][p][columnName]+fp[model][p][columnName]+tn[model][p][columnName]+fn[model][p][columnName]\n",
    "        accuracy = correct/total\n",
    "        expected_accuracy = alerts/total * positive/total + (1 - alerts/total) * (1 - positive/total)\n",
    "        model_k.append((accuracy - expected_accuracy) / (1 - expected_accuracy))\n",
    "    models_k.append(sum(model_k)/len(model_k))\n",
    "    print(model, models_k[-1])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8b76663b-bd19-4ff9-9555-e7fa7f77af12",
   "metadata": {},
   "source": [
    "### Compare each model performance for each prompt using Wilcoxon tests on the single columns Cohen's kappa, across different annotators"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "id": "5ba5151f-6812-4277-bf2e-5d420de0a4fb",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Claude 01 WilcoxonResult(statistic=9.0, pvalue=0.064453125)\n",
      "Claude 12 WilcoxonResult(statistic=31.0, pvalue=0.849609375)\n",
      "Claude 13 WilcoxonResult(statistic=16.0, pvalue=0.421875)\n",
      "DSChatbot 01 WilcoxonResult(statistic=12.0, pvalue=0.23046875)\n",
      "DSChatbot 12 WilcoxonResult(statistic=3.0, pvalue=0.625)\n",
      "DSChatbot 13 WilcoxonResult(statistic=0.0, pvalue=0.5)\n",
      "GPT 01 WilcoxonResult(statistic=3.0, pvalue=0.0390625)\n",
      "GPT 12 WilcoxonResult(statistic=45.0, pvalue=1.0)\n",
      "GPT 13 WilcoxonResult(statistic=0.0, pvalue=0.00390625)\n",
      "Qwen 01 WilcoxonResult(statistic=28.0, pvalue=0.751953125)\n",
      "Qwen 12 WilcoxonResult(statistic=0.0, pvalue=0.001953125)\n",
      "Qwen 13 WilcoxonResult(statistic=3.0, pvalue=1.0)\n"
     ]
    }
   ],
   "source": [
    "from scipy.stats import wilcoxon\n",
    "\n",
    "for model in models:\n",
    "    model_k = []\n",
    "    for p in range(4):\n",
    "        prompt_k = []\n",
    "        for (columnName, columnData) in gt.items():\n",
    "            correct = tp[model][p][columnName]+tn[model][p][columnName]\n",
    "            alerts = tp[model][p][columnName]+fp[model][p][columnName]\n",
    "            positive = tp[model][p][columnName]+fn[model][p][columnName]\n",
    "            total = tp[model][p][columnName]+fp[model][p][columnName]+tn[model][p][columnName]+fn[model][p][columnName]\n",
    "            accuracy = correct/total\n",
    "            expected_accuracy = alerts/total * positive/total + (1 - alerts/total) * (1 - positive/total)\n",
    "            prompt_k.append((accuracy - expected_accuracy) / (1 - expected_accuracy))\n",
    "        model_k.append(prompt_k)\n",
    "    print(model, \"01\", wilcoxon(model_k[0], model_k[1], alternative='less'))\n",
    "    print(model, \"12\", wilcoxon(model_k[1], model_k[2], alternative='less'))\n",
    "    print(model, \"13\", wilcoxon(model_k[2], model_k[3], alternative='less'))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "conda_python3",
   "language": "python",
   "name": "conda_python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
