Commit 399d77d3dc57027ebca78e9b57540afd0f49b9ca

Authored by dsotofor
1 parent 3e9c3b6318
Exists in main

another version

Showing 9 changed files with 97 additions and 54 deletions Inline Diff

chapters/CBR.aux View file @ 399d77d
\relax 1 1 \relax
\providecommand\hyper@newdestlabel[2]{} 2 2 \providecommand\hyper@newdestlabel[2]{}
\citation{schank+abelson77} 3 3 \citation{schank+abelson77}
\citation{KOLODNER1983281} 4 4 \citation{KOLODNER1983281}
\citation{Riesbeck1989} 5 5 \citation{Riesbeck1989}
\citation{JUNG20095695} 6 6 \citation{JUNG20095695}
\@writefile{toc}{\contentsline {chapter}{\numberline {4}\'Etat de l'art (Raisonnement à Partir de Cas)}{29}{chapter.4}\protected@file@percent } 7 7 \@writefile{toc}{\contentsline {chapter}{\numberline {4}\'Etat de l'art (Raisonnement à Partir de Cas)}{29}{chapter.4}\protected@file@percent }
\@writefile{lof}{\addvspace {10\p@ }} 8 8 \@writefile{lof}{\addvspace {10\p@ }}
\@writefile{lot}{\addvspace {10\p@ }} 9 9 \@writefile{lot}{\addvspace {10\p@ }}
\@writefile{toc}{\contentsline {section}{\numberline {4.1}Raisonnement à partir de cas (RàPC)}{29}{section.4.1}\protected@file@percent } 10 10 \@writefile{toc}{\contentsline {section}{\numberline {4.1}Raisonnement à partir de cas (RàPC)}{29}{section.4.1}\protected@file@percent }
\citation{10.1007/978-3-642-15973-2_50} 11 11 \citation{10.1007/978-3-642-15973-2_50}
\citation{PETROVIC201617} 12 12 \citation{PETROVIC201617}
\citation{wolf2024keep} 13 13 \citation{wolf2024keep}
\citation{PAREJASLLANOVARCED2024111469} 14 14 \citation{PAREJASLLANOVARCED2024111469}
\citation{10.1007/978-3-319-47096-2_11} 15 15 \citation{10.1007/978-3-319-47096-2_11}
\citation{10.1007/978-3-642-15973-2_50} 16 16 \citation{10.1007/978-3-642-15973-2_50}
\citation{Robertson2014ARO} 17 17 \citation{Robertson2014ARO}
\citation{ROLDANREYES20151} 18 18 \citation{ROLDANREYES20151}
\citation{10.1007/978-3-319-47096-2_11} 19 19 \citation{10.1007/978-3-319-47096-2_11}
\citation{10.1007/978-3-319-61030-6_1} 20 20 \citation{10.1007/978-3-319-61030-6_1}
\citation{Muller} 21 21 \citation{Muller}
\citation{10.1007/978-3-319-24586-7_20} 22 22 \citation{10.1007/978-3-319-24586-7_20}
\citation{10.1007/978-3-030-58342-2_20} 23 23 \citation{10.1007/978-3-030-58342-2_20}
\citation{10.1007/978-3-030-01081-2_25} 24 24 \citation{10.1007/978-3-030-01081-2_25}
\citation{10.1007/978-3-030-58342-2_8} 25 25 \citation{10.1007/978-3-030-58342-2_8}
\citation{10.1007/978-3-030-58342-2_5} 26 26 \citation{10.1007/978-3-030-58342-2_5}
\citation{8495930} 27 27 \citation{8495930}
\citation{Obeid} 28 28 \citation{Obeid}
29 \citation{buildings13030651}
30 \citation{YU2023110163}
\citation{ROLDANREYES20151} 29 31 \citation{ROLDANREYES20151}
\citation{ROLDANREYES20151} 30 32 \citation{ROLDANREYES20151}
\citation{Obeid} 31 33 \citation{Obeid}
\citation{Obeid} 32 34 \citation{Obeid}
\citation{10.1007/978-3-319-47096-2_11} 33 35 \citation{10.1007/978-3-319-47096-2_11}
\citation{10.1007/978-3-319-47096-2_11} 34 36 \citation{10.1007/978-3-319-47096-2_11}
\citation{HU2025127130} 35 37 \citation{HU2025127130}
\citation{ALABDULRAHMAN2021114061} 36 38 \citation{ALABDULRAHMAN2021114061}
\@writefile{lof}{\contentsline {figure}{\numberline {4.1}{\ignorespaces Cycle du RàPC modifié. (Traduit de \cite {ROLDANREYES20151})\relax }}{34}{figure.caption.13}\protected@file@percent } 37 39 \@writefile{lof}{\contentsline {figure}{\numberline {4.1}{\ignorespaces Cycle du RàPC modifié. (Traduit de \cite {ROLDANREYES20151})\relax }}{34}{figure.caption.13}\protected@file@percent }
\newlabel{fig:figMCBR1}{{4.1}{34}{Cycle du RàPC modifié. (Traduit de \cite {ROLDANREYES20151})\relax }{figure.caption.13}{}} 38 40 \newlabel{fig:figMCBR1}{{4.1}{34}{Cycle du RàPC modifié. (Traduit de \cite {ROLDANREYES20151})\relax }{figure.caption.13}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {4.2}{\ignorespaces Taxonomie des techniques algorithmiques employées pour des modules de recommandation dans les EIAH (Traduit de \cite {Obeid})\relax }}{34}{figure.caption.14}\protected@file@percent } 39 41 \@writefile{lof}{\contentsline {figure}{\numberline {4.2}{\ignorespaces Taxonomie des techniques algorithmiques employées pour des modules de recommandation dans les EIAH (Traduit de \cite {Obeid})\relax }}{34}{figure.caption.14}\protected@file@percent }
\newlabel{fig:figTax}{{4.2}{34}{Taxonomie des techniques algorithmiques employées pour des modules de recommandation dans les EIAH (Traduit de \cite {Obeid})\relax }{figure.caption.14}{}} 40 42 \newlabel{fig:figTax}{{4.2}{34}{Taxonomie des techniques algorithmiques employées pour des modules de recommandation dans les EIAH (Traduit de \cite {Obeid})\relax }{figure.caption.14}{}}
\citation{JUNG20095695} 41 43 \citation{JUNG20095695}
\citation{10.1007/978-3-642-15973-2_50} 42 44 \citation{10.1007/978-3-642-15973-2_50}
\citation{PETROVIC201617} 43 45 \citation{PETROVIC201617}
\citation{ROLDANREYES20151} 44 46 \citation{ROLDANREYES20151}
\citation{10.1007/978-3-319-47096-2_11} 45 47 \citation{10.1007/978-3-319-47096-2_11}
\citation{10.1007/978-3-319-61030-6_1} 46 48 \citation{10.1007/978-3-319-61030-6_1}
\citation{Muller} 47 49 \citation{Muller}
\citation{10.1007/978-3-319-24586-7_20} 48 50 \citation{10.1007/978-3-319-24586-7_20}
\citation{10.1007/978-3-030-58342-2_20} 49 51 \citation{10.1007/978-3-030-58342-2_20}
\citation{10.1007/978-3-030-01081-2_25} 50 52 \citation{10.1007/978-3-030-01081-2_25}
\citation{10.1007/978-3-030-58342-2_8} 51 53 \citation{10.1007/978-3-030-58342-2_8}
\citation{10.1007/978-3-030-58342-2_5} 52 54 \citation{10.1007/978-3-030-58342-2_5}
\citation{8495930} 53 55 \citation{8495930}
\citation{Obeid} 54 56 \citation{Obeid}
\@writefile{lof}{\contentsline {figure}{\numberline {4.3}{\ignorespaces Ajout d'un cycle complémentaire avec \textit {Deep Learning} au RàPC (Traduit de \cite {10.1007/978-3-319-47096-2_11})\relax }}{35}{figure.caption.15}\protected@file@percent } 55 57 \@writefile{lof}{\contentsline {figure}{\numberline {4.3}{\ignorespaces Ajout d'un cycle complémentaire avec \textit {Deep Learning} au RàPC (Traduit de \cite {10.1007/978-3-319-47096-2_11})\relax }}{35}{figure.caption.15}\protected@file@percent }
\newlabel{fig:figMCBR2}{{4.3}{35}{Ajout d'un cycle complémentaire avec \textit {Deep Learning} au RàPC (Traduit de \cite {10.1007/978-3-319-47096-2_11})\relax }{figure.caption.15}{}} 56 58 \newlabel{fig:figMCBR2}{{4.3}{35}{Ajout d'un cycle complémentaire avec \textit {Deep Learning} au RàPC (Traduit de \cite {10.1007/978-3-319-47096-2_11})\relax }{figure.caption.15}{}}
\@writefile{lot}{\contentsline {table}{\numberline {4.1}{\ignorespaces Tableau de synthèse des articles analysés dans l’état de l’art du RàPC\relax }}{36}{table.caption.16}\protected@file@percent } 57 59 \@writefile{lot}{\contentsline {table}{\numberline {4.1}{\ignorespaces Tableau de synthèse des articles analysés dans l’état de l’art du RàPC\relax }}{36}{table.caption.16}\protected@file@percent }
\newlabel{tabArts2}{{4.1}{36}{Tableau de synthèse des articles analysés dans l’état de l’art du RàPC\relax }{table.caption.16}{}} 58 60 \newlabel{tabArts2}{{4.1}{36}{Tableau de synthèse des articles analysés dans l’état de l’art du RàPC\relax }{table.caption.16}{}}
\@setckpt{./chapters/CBR}{ 59 61 \@setckpt{./chapters/CBR}{
\setcounter{page}{37} 60 62 \setcounter{page}{37}
\setcounter{equation}{0} 61 63 \setcounter{equation}{0}
\setcounter{enumi}{0} 62 64 \setcounter{enumi}{0}
\setcounter{enumii}{0} 63 65 \setcounter{enumii}{0}
\setcounter{enumiii}{0} 64 66 \setcounter{enumiii}{0}
\setcounter{enumiv}{0} 65 67 \setcounter{enumiv}{0}
\setcounter{footnote}{0} 66 68 \setcounter{footnote}{0}
\setcounter{mpfootnote}{0} 67 69 \setcounter{mpfootnote}{0}
\setcounter{part}{2} 68 70 \setcounter{part}{2}
\setcounter{chapter}{4} 69 71 \setcounter{chapter}{4}
\setcounter{section}{1} 70 72 \setcounter{section}{1}
\setcounter{subsection}{0} 71 73 \setcounter{subsection}{0}
\setcounter{subsubsection}{0} 72 74 \setcounter{subsubsection}{0}
\setcounter{paragraph}{0} 73 75 \setcounter{paragraph}{0}
\setcounter{subparagraph}{0} 74 76 \setcounter{subparagraph}{0}
\setcounter{figure}{3} 75 77 \setcounter{figure}{3}
\setcounter{table}{1} 76 78 \setcounter{table}{1}
\setcounter{caption@flags}{2} 77 79 \setcounter{caption@flags}{2}
\setcounter{continuedfloat}{0} 78 80 \setcounter{continuedfloat}{0}
\setcounter{subfigure}{0} 79 81 \setcounter{subfigure}{0}
\setcounter{subtable}{0} 80 82 \setcounter{subtable}{0}
\setcounter{parentequation}{0} 81 83 \setcounter{parentequation}{0}
\setcounter{thmt@dummyctr}{0} 82 84 \setcounter{thmt@dummyctr}{0}
\setcounter{vrcnt}{0} 83 85 \setcounter{vrcnt}{0}
\setcounter{upm@subfigure@count}{0} 84 86 \setcounter{upm@subfigure@count}{0}
\setcounter{upm@fmt@mtabular@columnnumber}{0} 85 87 \setcounter{upm@fmt@mtabular@columnnumber}{0}
\setcounter{upm@format@section@sectionlevel}{1} 86 88 \setcounter{upm@format@section@sectionlevel}{1}
\setcounter{upm@fmt@savedcounter}{0} 87 89 \setcounter{upm@fmt@savedcounter}{0}
\setcounter{@@upm@fmt@inlineenumeration}{0} 88 90 \setcounter{@@upm@fmt@inlineenumeration}{0}
\setcounter{@upm@fmt@enumdescription@cnt@}{0} 89 91 \setcounter{@upm@fmt@enumdescription@cnt@}{0}
\setcounter{upmdefinition}{0} 90 92 \setcounter{upmdefinition}{0}
\setcounter{section@level}{1} 91 93 \setcounter{section@level}{1}
\setcounter{Item}{0} 92 94 \setcounter{Item}{0}
\setcounter{Hfootnote}{0} 93 95 \setcounter{Hfootnote}{0}
\setcounter{bookmark@seq@number}{31} 94 96 \setcounter{bookmark@seq@number}{31}
\setcounter{DefaultLines}{2} 95 97 \setcounter{DefaultLines}{2}
\setcounter{DefaultDepth}{0} 96 98 \setcounter{DefaultDepth}{0}
\setcounter{L@lines}{3} 97 99 \setcounter{L@lines}{3}
\setcounter{L@depth}{0} 98 100 \setcounter{L@depth}{0}
\setcounter{float@type}{8} 99 101 \setcounter{float@type}{8}
\setcounter{algorithm}{0} 100 102 \setcounter{algorithm}{0}
\setcounter{ALG@line}{0} 101 103 \setcounter{ALG@line}{0}
\setcounter{ALG@rem}{0} 102 104 \setcounter{ALG@rem}{0}
\setcounter{ALG@nested}{0} 103 105 \setcounter{ALG@nested}{0}
\setcounter{ALG@Lnr}{2} 104 106 \setcounter{ALG@Lnr}{2}
\setcounter{ALG@blocknr}{10} 105 107 \setcounter{ALG@blocknr}{10}
\setcounter{ALG@storecount}{0} 106 108 \setcounter{ALG@storecount}{0}
\setcounter{ALG@tmpcounter}{0} 107 109 \setcounter{ALG@tmpcounter}{0}
} 108 110 }
chapters/CBR.tex View file @ 399d77d
\chapter{\'Etat de l'art (Raisonnement à Partir de Cas)} 1 1 \chapter{\'Etat de l'art (Raisonnement à Partir de Cas)}
2 2
\section{Raisonnement à partir de cas (RàPC)} 3 3 \section{Raisonnement à partir de cas (RàPC)}
4 4
Le raisonnement à partir de cas est une approche fondée sur la connaissance. Il s'agit d'une technique d'intelligence artificielle dont l'idée est de résoudre un nouveau problème grâce aux connaissances déjà acquises par le système et en tenant un raisonnement fondé sur l'analogie. Le RàPC est apparu comme une alternative pour améliorer les systèmes experts. Shank et Abelson \cite{schank+abelson77} ont initialement mené des travaux sur l'organisation hiérarchique de la mémoire pour imiter le raisonnement humain. Ceux-ci ont servi de fondement aux travaux de Janet Kolodner \cite{KOLODNER1983281} et ont abouti à l'implémentation un système fondé sur ces principes en 1983. Le terme \textit{raisonnement à partir de cas} est utilisé pour la première fois en 1989 par Riesbeck et Shank \cite{Riesbeck1989}. 5 5 Le raisonnement à partir de cas est une approche fondée sur la connaissance. Il s'agit d'une technique d'intelligence artificielle dont l'idée est de résoudre un nouveau problème grâce aux connaissances déjà acquises par le système et en tenant un raisonnement fondé sur l'analogie. Le RàPC est apparu comme une alternative pour améliorer les systèmes experts. Shank et Abelson \cite{schank+abelson77} ont initialement mené des travaux sur l'organisation hiérarchique de la mémoire pour imiter le raisonnement humain. Ceux-ci ont servi de fondement aux travaux de Janet Kolodner \cite{KOLODNER1983281} et ont abouti à l'implémentation un système fondé sur ces principes en 1983. Le terme \textit{raisonnement à partir de cas} est utilisé pour la première fois en 1989 par Riesbeck et Shank \cite{Riesbeck1989}.
6 6
Comme vu dans le chapitre du contexte, le raisonnement à partir de cas utilise quatre conteneurs de connaissance pour représenter la connaissance complète du système, chaque conteneur stocke l'information associée à une fonction spécifique et tout est fondé sur le carré d'analogie, c'est-à-dire des solutions ayant permis de résoudre un problème ancien sont réutilisées afin de résoudre un problème nouveau similaire. La plupart des systèmes de RàPC utilisent pour son fonctionnement comme base un cycle composé de quatre étapes (retrouver, réutiliser, réviser et retenir). 7 7 Comme vu dans le chapitre du contexte, le raisonnement à partir de cas utilise quatre conteneurs de connaissance pour représenter la connaissance complète du système, chaque conteneur stocke l'information associée à une fonction spécifique et tout est fondé sur le carré d'analogie, c'est-à-dire des solutions ayant permis de résoudre un problème ancien sont réutilisées afin de résoudre un problème nouveau similaire. La plupart des systèmes de RàPC utilisent pour son fonctionnement comme base un cycle composé de quatre étapes (retrouver, réutiliser, réviser et retenir).
8 8
Les travaux ici cités sont des travaux représentatifs parce que ils ont permis de trouver certaines points d'amélioration du RàPC, ont donné des pistes d'intégration avec d'autres techniques ou ont donné des idées de modification pour obtenir bons résultats en gagnant de la performance. 9 9 Les travaux ici cités sont des travaux représentatifs parce que ils ont permis de trouver certaines points d'amélioration du RàPC, ont donné des pistes d'intégration avec d'autres techniques ou ont donné des idées de modification pour obtenir bons résultats en gagnant de la performance.
10 10
---RàPC et réseaux de neurones 11 11 ---RàPC et réseaux de neurones
12 12
Plusieurs travaux combinent le RàPC avec les réseaux de neurones (Deep Learning) avec l'objectif d'améliorer les réponses générées et optimiser les performances de chaque algorithme. C'est une idée qui marche dans certains cas, mais qui n'est pas très récente comme par exemple dans \cite{JUNG20095695} les auteurs développent un système fondé sur l'hybridation du RàPC avec des réseaux de neurones pour concevoir des produits. Le système se focalise uniquement dans les phases "rechercher" et "réutiliser" du RàPC dans lesquelles sont exécutés les algorithmes implémentés. Le système détermine de façon automatique les valeurs pour les paramètres nécessaires à la conception d'un produit particulier en suivant le cycle traditionnel du RàPC. Avec l'algorithme de k-moyennes, est extrait un cas représentatif de la base de cas et l'adaptation des solutions des voisins trouvées est faite avec le réseau de neurones RBFN (Radial Basis Function Network). L'évaluation du système est faite en utilisant une base de données contenant 830 tests, les résultats démontrent que les produits conçus s'ajustent avec un grand pourcentage aux normes définies. 13 13 Plusieurs travaux combinent le RàPC avec les réseaux de neurones (Deep Learning) avec l'objectif d'améliorer les réponses générées et optimiser les performances de chaque algorithme. C'est une idée qui marche dans certains cas, mais qui n'est pas très récente comme par exemple dans \cite{JUNG20095695} les auteurs développent un système fondé sur l'hybridation du RàPC avec des réseaux de neurones pour concevoir des produits. Le système se focalise uniquement dans les phases "rechercher" et "réutiliser" du RàPC dans lesquelles sont exécutés les algorithmes implémentés. Le système détermine de façon automatique les valeurs pour les paramètres nécessaires à la conception d'un produit particulier en suivant le cycle traditionnel du RàPC. Avec l'algorithme de k-moyennes, est extrait un cas représentatif de la base de cas et l'adaptation des solutions des voisins trouvées est faite avec le réseau de neurones RBFN (Radial Basis Function Network). L'évaluation du système est faite en utilisant une base de données contenant 830 tests, les résultats démontrent que les produits conçus s'ajustent avec un grand pourcentage aux normes définies.
14 14
Dans \cite{10.1007/978-3-642-15973-2_50} un réseau de neurones classique est implémenté pour définir la géométrie d'une matrice pour l'extrusion de l'aluminium. En effet, actuellement c'est un processus qui se fait manuellement et par essai et erreur. Le RàPC est alors utilisé pour aider à déterminer les valeurs optimales des paramètres du réseau, en utilisant l'information des matrices d'extrusion déjà testées. 15 15 Dans \cite{10.1007/978-3-642-15973-2_50} un réseau de neurones classique est implémenté pour définir la géométrie d'une matrice pour l'extrusion de l'aluminium. En effet, actuellement c'est un processus qui se fait manuellement et par essai et erreur. Le RàPC est alors utilisé pour aider à déterminer les valeurs optimales des paramètres du réseau, en utilisant l'information des matrices d'extrusion déjà testées.
16 16
Aussi le travail de \cite{PETROVIC201617} où les auteurs proposent un système de raisonnement à partir de cas pour calculer la dose optimale de radiation pour le traitement du cancer. Dans ce domaine particulier de la radiothérapie, administrer une dose nécessite de connaître avec précision le nombre de faisceaux et l'angle de chacun d'eux. L'algorithme proposé tente de trouver la combinaison de valeurs optimales pour ces deux paramètres en utilisant les réseaux de neurones. L'utilisation des réseaux de neurones intervient lors de l'adaptation des cas connus : ils modifient le nombre et les angles des faisceaux. La validation de l'algorithme est évaluée avec une base de 80 cas réels de cancer du cerveau extraits de l'hôpital de Nottingham City. Le nombre de neurones et de couches ont été définis de façon empirique. Les résultats montrent que l'utilisation des cas historiques et la construction des solutions à partir des solutions déjà connues permet une amélioration de 12\% concernant la décision du nombre de faisceaux et de 29\% concernant la décision liée à leur angle. 17 17 Aussi le travail de \cite{PETROVIC201617} où les auteurs proposent un système de raisonnement à partir de cas pour calculer la dose optimale de radiation pour le traitement du cancer. Dans ce domaine particulier de la radiothérapie, administrer une dose nécessite de connaître avec précision le nombre de faisceaux et l'angle de chacun d'eux. L'algorithme proposé tente de trouver la combinaison de valeurs optimales pour ces deux paramètres en utilisant les réseaux de neurones. L'utilisation des réseaux de neurones intervient lors de l'adaptation des cas connus : ils modifient le nombre et les angles des faisceaux. La validation de l'algorithme est évaluée avec une base de 80 cas réels de cancer du cerveau extraits de l'hôpital de Nottingham City. Le nombre de neurones et de couches ont été définis de façon empirique. Les résultats montrent que l'utilisation des cas historiques et la construction des solutions à partir des solutions déjà connues permet une amélioration de 12\% concernant la décision du nombre de faisceaux et de 29\% concernant la décision liée à leur angle.
18 18
Plus récemment se trouvent travaux comme \cite{wolf2024keep} où le RàPC est employée comme technique pour expliquer les résultats générés par un réseau qui classifie les images en fonction de certains attributs et zones dans les images, le résultats permettent conclure que les explications trouvées avec RàPC sont très fidèles aux images testées. Aussi \cite{PAREJASLLANOVARCED2024111469} utilisent le RàPC et ses avantages pour le coupler à un réseau profond (Deep Learing) pour sélectionner la meilleure explication au résultat donné par le classificateur d'images dans ce cas, la base de connaissance sont certaines images avec des étiquettes qui contiennent l'information associée à l'image, la comparaison réalisée avec les algorithmes représentatifs de l'état de l'art suggèrent que la combinaison Deep Learning marche bien dans ce type de problèmes. 19 19 Plus récemment se trouvent travaux comme \cite{wolf2024keep} où le RàPC est employée comme technique pour expliquer les résultats générés par un réseau qui classifie les images en fonction de certains attributs et zones dans les images, le résultats permettent conclure que les explications trouvées avec RàPC sont très fidèles aux images testées. Aussi \cite{PAREJASLLANOVARCED2024111469} utilisent le RàPC et ses avantages pour le coupler à un réseau profond (Deep Learing) pour sélectionner la meilleure explication au résultat donné par le classificateur d'images dans ce cas, la base de connaissance sont certaines images avec des étiquettes qui contiennent l'information associée à l'image, la comparaison réalisée avec les algorithmes représentatifs de l'état de l'art suggèrent que la combinaison Deep Learning marche bien dans ce type de problèmes.
20 20
--- Modifications du cycle fondamental 21 21 --- Modifications du cycle fondamental
22 22
Certains travaux ont appliqué le raisonnement à partir de cas à un problème spécifique en proposant des représentations des cas et des solutions différentes, d'autres ont modifié le cycle conceptuel comme le montre la figure \figref{figMCBR2} extraite de \cite{10.1007/978-3-319-47096-2_11}. Dans cette proposition, un cycle complémentaire incluant un outil d'apprentissage profond (\textit{Deep Learning}) est ajouté pour améliorer le résultat du processus du RàPC. Dans \cite{10.1007/978-3-642-15973-2_50}, la phase de stockage est modifiée en retenant les cas dont les résultats n'ont pas eu de succès, pour guider le processus dans la fabrications de nouvelles pièces. Enfin, dans \cite{Robertson2014ARO}, les auteurs proposent d'ajouter à chaque cas une valeur d'utilité espérée selon chaque action possible. Cet ajout s'accompagne d'une prédiction probabiliste des actions que l'application engendrera en réponse. Cette prédiction probabiliste dépend bien entendu de l'état initial du système avant mise en oeuvre de l'action. 23 23 Certains travaux ont appliqué le raisonnement à partir de cas à un problème spécifique en proposant des représentations des cas et des solutions différentes, d'autres ont modifié le cycle conceptuel comme le montre la figure \figref{figMCBR2} extraite de \cite{10.1007/978-3-319-47096-2_11}. Dans cette proposition, un cycle complémentaire incluant un outil d'apprentissage profond (\textit{Deep Learning}) est ajouté pour améliorer le résultat du processus du RàPC. Dans \cite{10.1007/978-3-642-15973-2_50}, la phase de stockage est modifiée en retenant les cas dont les résultats n'ont pas eu de succès, pour guider le processus dans la fabrications de nouvelles pièces. Enfin, dans \cite{Robertson2014ARO}, les auteurs proposent d'ajouter à chaque cas une valeur d'utilité espérée selon chaque action possible. Cet ajout s'accompagne d'une prédiction probabiliste des actions que l'application engendrera en réponse. Cette prédiction probabiliste dépend bien entendu de l'état initial du système avant mise en oeuvre de l'action.
24 24
Plusieurs travaux appliquent le RàPC avec succès en proposant des modifications dans chacune des phases ou en combinant différents algorithmes. Certains systèmes de RàPC appliqués au domaine de la conception de produits sont remarquables à ce titre. Dans \cite{ROLDANREYES20151} les auteurs proposent, comme le montre la figure \figref{figMCBR1}, un algorithme pour produire le propylène glycol dans un réacteur chimique. Dans ce cas, la phase de réutilisation du RàPC est couplée à la recherche des états qui satisfassent le nouveau problème (\textit{Constraint satisfaction problems CSP}) en utilisant l'information des cas de base déjà résolus. Les solutions trouvées sont évaluées selon le nombre de changements réalisés sur les solutions déjà connues (parcimonie), le nombre de solutions possibles trouvées (précision), l'évaluation de commentaires faits par des experts et la complexité des transformations réalisées. 25 25 Plusieurs travaux appliquent le RàPC avec succès en proposant des modifications dans chacune des phases ou en combinant différents algorithmes. Certains systèmes de RàPC appliqués au domaine de la conception de produits sont remarquables à ce titre. Dans \cite{ROLDANREYES20151} les auteurs proposent, comme le montre la figure \figref{figMCBR1}, un algorithme pour produire le propylène glycol dans un réacteur chimique. Dans ce cas, la phase de réutilisation du RàPC est couplée à la recherche des états qui satisfassent le nouveau problème (\textit{Constraint satisfaction problems CSP}) en utilisant l'information des cas de base déjà résolus. Les solutions trouvées sont évaluées selon le nombre de changements réalisés sur les solutions déjà connues (parcimonie), le nombre de solutions possibles trouvées (précision), l'évaluation de commentaires faits par des experts et la complexité des transformations réalisées.
26 26
Dans \cite{10.1007/978-3-319-47096-2_11}, les auteurs ajoutent un nouveau cycle au cycle traditionnel du RàPC. Le premier cycle génère des descriptions abstraites du problème avec un réseau de neurones et des algorithmes génétiques; le second cycle prend les descriptions abstraites comme des nouveaux cas, cherche les cas similaires et adapte les solutions rencontrées. L'exécution des deux cycles prend en compte certains critères prédéfinis par l'utilisateur. En comparant le même problème avec le cycle traditionnel du RàPC, les auteurs mesurent une amélioration de la qualité des recettes proposées et montrent que celles-ci sont plus en accord avec les critères définis. 27 27 Dans \cite{10.1007/978-3-319-47096-2_11}, les auteurs ajoutent un nouveau cycle au cycle traditionnel du RàPC. Le premier cycle génère des descriptions abstraites du problème avec un réseau de neurones et des algorithmes génétiques; le second cycle prend les descriptions abstraites comme des nouveaux cas, cherche les cas similaires et adapte les solutions rencontrées. L'exécution des deux cycles prend en compte certains critères prédéfinis par l'utilisateur. En comparant le même problème avec le cycle traditionnel du RàPC, les auteurs mesurent une amélioration de la qualité des recettes proposées et montrent que celles-ci sont plus en accord avec les critères définis.
28 28
\cite{10.1007/978-3-319-61030-6_1} s'intéressent quant à eux à la génération de recettes de cuisine originales. Les auteurs modifient le cycle traditionnel du RàPC et créent deux cycles, combinant l'apprentissage profond et la récupération de cas basée sur la similarité, le premier cycle génère des descriptions abstraites de la conception avec des algorithmes génétiques et un réseau neuronal profond, le second cycle utilise les descriptions abstraites pour récupérer et adapter des objets, cette structure donne lieu à un prototype appelé Q-chef qui génère une recette basée sur la base de données d'ingrédients et les demandes de l'utilisateur. Ce travail ne montre pas de métriques standard génériques mais utilise deux nombres indicatifs (plausibilité et surprise) pour démontrer la génération efficace de nouvelles recettes selon les critères de l'utilisateur en comparant le RàPC à deux cycles avec le RàPC à un cycle, démontrant plus de plausibilité et de surprise dans les recettes générées. 29 29 \cite{10.1007/978-3-319-61030-6_1} s'intéressent quant à eux à la génération de recettes de cuisine originales. Les auteurs modifient le cycle traditionnel du RàPC et créent deux cycles, combinant l'apprentissage profond et la récupération de cas basée sur la similarité, le premier cycle génère des descriptions abstraites de la conception avec des algorithmes génétiques et un réseau neuronal profond, le second cycle utilise les descriptions abstraites pour récupérer et adapter des objets, cette structure donne lieu à un prototype appelé Q-chef qui génère une recette basée sur la base de données d'ingrédients et les demandes de l'utilisateur. Ce travail ne montre pas de métriques standard génériques mais utilise deux nombres indicatifs (plausibilité et surprise) pour démontrer la génération efficace de nouvelles recettes selon les critères de l'utilisateur en comparant le RàPC à deux cycles avec le RàPC à un cycle, démontrant plus de plausibilité et de surprise dans les recettes générées.
30 30
--- Techniques de génération / transformation de solutions 31 31 --- Techniques de génération / transformation de solutions
32 32
La représentation des cas peut permettre également d'améliorer les résultats d'un système de RàPC. La performance d'un système de RàPC dépend de la quantité d'informations stockées, mais également des algorithmes implémentés. C'est le cas dans \cite{Muller} où les recettes sont codées comme un processus de transformation et mélangent des ingrédients en suivant une suite d'étapes ordonnés. Pour créer des recettes innovantes, une mesure de distance est utilisée entre les ingrédients. Cette mesure permet de trouver une recette en substituant certains ingrédients par d'autres, similaires ou aux qualités et/ou caractéristiques similaires. De la même manière, il est possible de créer des recettes plus proches des exigences des utilisateurs. Les étapes de transformation appelées aussi opérateurs sont stockées et catégorisées grâce à une métrique permettant de les échanger afin d'obtenir une nouvelle recette. 33 33 La représentation des cas peut permettre également d'améliorer les résultats d'un système de RàPC. La performance d'un système de RàPC dépend de la quantité d'informations stockées, mais également des algorithmes implémentés. C'est le cas dans \cite{Muller} où les recettes sont codées comme un processus de transformation et mélangent des ingrédients en suivant une suite d'étapes ordonnés. Pour créer des recettes innovantes, une mesure de distance est utilisée entre les ingrédients. Cette mesure permet de trouver une recette en substituant certains ingrédients par d'autres, similaires ou aux qualités et/ou caractéristiques similaires. De la même manière, il est possible de créer des recettes plus proches des exigences des utilisateurs. Les étapes de transformation appelées aussi opérateurs sont stockées et catégorisées grâce à une métrique permettant de les échanger afin d'obtenir une nouvelle recette.
34 34
La génération, analyse et correction de texte constitue également un domaine d'application intéressant du RàPC. Pour la réalisation de ces tâches il est parfois nécessaire de transformer le texte en représentation numérique ou de mesurer la proximité sémantique de mots. Le travail de \cite{10.1007/978-3-319-24586-7_20} utilise le RàPC pour générer des histoires en utilisant le texte d'autres histoires. Le système décrit explore les transformations possibles afin que la nouvelle histoire ne soit pas très similaire aux histoires déjà connues mais que elle soit cohérente. Le travail est plus focalisé sur la phase de révision, car les résultats de celle-ci déterminent si l'histoire générée correspond aux critères spécifiés ou si le cycle d'adaptation doit recommencer; l'adaptation se fait en recherchant les personnages, les contextes, les objets dans la base de cas, et en les unifiant avec des actions; la plupart des histoires générées sont cohérentes mais elles sont constituées de paragraphes très courts. 35 35 La génération, analyse et correction de texte constitue également un domaine d'application intéressant du RàPC. Pour la réalisation de ces tâches il est parfois nécessaire de transformer le texte en représentation numérique ou de mesurer la proximité sémantique de mots. Le travail de \cite{10.1007/978-3-319-24586-7_20} utilise le RàPC pour générer des histoires en utilisant le texte d'autres histoires. Le système décrit explore les transformations possibles afin que la nouvelle histoire ne soit pas très similaire aux histoires déjà connues mais que elle soit cohérente. Le travail est plus focalisé sur la phase de révision, car les résultats de celle-ci déterminent si l'histoire générée correspond aux critères spécifiés ou si le cycle d'adaptation doit recommencer; l'adaptation se fait en recherchant les personnages, les contextes, les objets dans la base de cas, et en les unifiant avec des actions; la plupart des histoires générées sont cohérentes mais elles sont constituées de paragraphes très courts.
36 36
Dans \cite{10.1007/978-3-030-58342-2_20} les phrases écrites en langue française sont corrigées. Ce travail n'utilise ni la transformation numérique des phrases, ni de connaissances linguistiques, mais retrouve les phrases similaires en utilisant l'algorithme LCS (\textit{Longest Common Subsequence}) et en calculant une mesure de distance avec les phrases correctes et incorrectes de la base de connaissances. Si les phrases similaires sont incorrectes, le système peut proposer une correction en changeant certains mots selon le contexte et recalculer les distances afin de mesurer la cohérence et pertinence de la phrase proposée. 37 37 Dans \cite{10.1007/978-3-030-58342-2_20} les phrases écrites en langue française sont corrigées. Ce travail n'utilise ni la transformation numérique des phrases, ni de connaissances linguistiques, mais retrouve les phrases similaires en utilisant l'algorithme LCS (\textit{Longest Common Subsequence}) et en calculant une mesure de distance avec les phrases correctes et incorrectes de la base de connaissances. Si les phrases similaires sont incorrectes, le système peut proposer une correction en changeant certains mots selon le contexte et recalculer les distances afin de mesurer la cohérence et pertinence de la phrase proposée.
38 38
--- Prédiction avec RàPC 39 39 --- Prédiction avec RàPC
40 40
L'objectif du travail de \cite{10.1007/978-3-030-01081-2_25} est la prédiction du temps de course pour un athlète. Un KNN recherche les cas similaires. Le système interpole un temps final grâce au calcul d'une moyenne pondérée des meilleurs temps des cas similaires retrouvés dans la première étape du RàPC. 41 41 L'objectif du travail de \cite{10.1007/978-3-030-01081-2_25} est la prédiction du temps de course pour un athlète. Un KNN recherche les cas similaires. Le système interpole un temps final grâce au calcul d'une moyenne pondérée des meilleurs temps des cas similaires retrouvés dans la première étape du RàPC.
42 42
Dans \cite{10.1007/978-3-030-58342-2_8}, les auteurs essaient de prédire le meilleur temps d'un patineur par analogie avec ceux des patineurs ayant des caractéristiques et une histoire de course similaires. Cependant parfois, calculer une moyenne des temps similaires trouvés ne suffit pas. Certaines caractéristiques liées au contexte, à l'environnement et à la nature de la course (le type de course, le type de piste, la distance à parcourir, etc.), peuvent en effet influencer de manière importante la performance du patineur. L'algorithme a été testé avec une base de données contenant les informations de 21 courses de 500m, 700m, 1000m, 1500m, 3km, 5km and 10km réalisées entre Septembre 2015 et Janvier 2020. 43 43 Dans \cite{10.1007/978-3-030-58342-2_8}, les auteurs essaient de prédire le meilleur temps d'un patineur par analogie avec ceux des patineurs ayant des caractéristiques et une histoire de course similaires. Cependant parfois, calculer une moyenne des temps similaires trouvés ne suffit pas. Certaines caractéristiques liées au contexte, à l'environnement et à la nature de la course (le type de course, le type de piste, la distance à parcourir, etc.), peuvent en effet influencer de manière importante la performance du patineur. L'algorithme a été testé avec une base de données contenant les informations de 21 courses de 500m, 700m, 1000m, 1500m, 3km, 5km and 10km réalisées entre Septembre 2015 et Janvier 2020.
44 44
Un système multi-fonctionnel est décrit dans \cite{10.1007/978-3-030-58342-2_5}. Celui-ci permet d’obtenir une prédiction du temps de course, de suggérer un plan du rythme de la course et il recommande également un plan d'entraînement pour une course donnée. Les trois fonctionnalités sont implémentées en utilisant le RàPC. Les calculs de la similarité sont fondés sur un historique et des caractéristiques physiques des coureurs. Les plans d'entraînement sont génériques et sont proposés sur les 16 semaines précédant le début du marathon ciblé (selon les auteurs, c'est en effet le temps usuel pour une préparation à ce type d'épreuve). Le système a été évalué avec une base de données constituée de caractéristiques de 21000 coureurs des marathons de Dublin, Londres ou New-York pour la période de 2014 à 2017. 45 45 Un système multi-fonctionnel est décrit dans \cite{10.1007/978-3-030-58342-2_5}. Celui-ci permet d’obtenir une prédiction du temps de course, de suggérer un plan du rythme de la course et il recommande également un plan d'entraînement pour une course donnée. Les trois fonctionnalités sont implémentées en utilisant le RàPC. Les calculs de la similarité sont fondés sur un historique et des caractéristiques physiques des coureurs. Les plans d'entraînement sont génériques et sont proposés sur les 16 semaines précédant le début du marathon ciblé (selon les auteurs, c'est en effet le temps usuel pour une préparation à ce type d'épreuve). Le système a été évalué avec une base de données constituée de caractéristiques de 21000 coureurs des marathons de Dublin, Londres ou New-York pour la période de 2014 à 2017.
46 46
--- Systèmes de recommandation 47 47 --- Systèmes de recommandation
48 48
Les systèmes de recommandation et le RàPC peuvent aussi être combinés comme dans le système proposé par \cite{8495930} montre les bénéfices de l'utilisation du RàPC dans les environnements informatiques pour l'apprentissage humain (EIAH). Le modèle proposé suit le cycle traditionnel du RàPC en combinant les modèles d'apprentissages traditionnels et numériques. Les principales contributions sont la représentation des cas et la recommandation des parcours d'apprentissage personnalisés selon les informations issues des autres apprenants. Une base de cas initiaux a été créée pour mesurer l'efficacité du modèle. Celle-ci stocke la recommandation du parcours de 120 apprenants. Des examens sont réalisés avant et après avoir suivi le parcours recommandé par le système permettent de mesurer l'efficacité de la recommandation proposée. 49 49 Les systèmes de recommandation et le RàPC peuvent aussi être combinés comme dans le système proposé par \cite{8495930} montre les bénéfices de l'utilisation du RàPC dans les environnements informatiques pour l'apprentissage humain (EIAH). Le modèle proposé suit le cycle traditionnel du RàPC en combinant les modèles d'apprentissages traditionnels et numériques. Les principales contributions sont la représentation des cas et la recommandation des parcours d'apprentissage personnalisés selon les informations issues des autres apprenants. Une base de cas initiaux a été créée pour mesurer l'efficacité du modèle. Celle-ci stocke la recommandation du parcours de 120 apprenants. Des examens sont réalisés avant et après avoir suivi le parcours recommandé par le système permettent de mesurer l'efficacité de la recommandation proposée.
50 50
\cite{Obeid}. Le système décrit dans cette étude présente la particularité d'être capable d'analyser des données hétérogènes et multidimensionnelles. Dans ce travail, un parcours de carrière et les universités/collèges est recommandé aux élèves du secondaire en fonction de leurs intérêts. Ce travail montre également une taxonomie des techniques algorithmiques généralement utilisées dans les systèmes de recommandation pour les EIAH (figure \figref{figTax}). 51 51 \cite{Obeid}. Le système décrit dans cette étude présente la particularité d'être capable d'analyser des données hétérogènes et multidimensionnelles. Dans ce travail, un parcours de carrière et les universités/collèges est recommandé aux élèves du secondaire en fonction de leurs intérêts. Ce travail montre également une taxonomie des techniques algorithmiques généralement utilisées dans les systèmes de recommandation pour les EIAH (figure \figref{figTax}).
52 52
--- Méthodes d'ensemble 53 53 --- Méthodes d'ensemble
54 54
Uysal et al. [...] mettent en œuvre un RáPC avec une méthode d'agrégation bootstrap (bagging) pour améliorer la précision du CBR et réduire la variance. 55 55 \cite{buildings13030651} mettent en œuvre un RáPC avec une méthode d'agrégation bootstrap (bagging) pour améliorer la précision du CBR et réduire la variance.
56 56
Un modèle d'ensemble fondé sur le raisonnement à partir de cas est proposé par Yu et al. [...] appliqué à la prédiction financière et au remplissage des données manquantes. Dans ce cas, pour retrouver les plus proches voisins, le modèle utilise trois mesures de distance différentes et une étape de vote pour l'intégration. Le modèle a été testé avec une base de données comportant onze dimensions d'informations financières provenant de 249 entreprises. La comparaison est faite avec deux objectifs. Premièrement, le remplissage des données manquantes avec d'autres algorithmes tels que KNN ou RandomForest, et deuxièmement, la comparaison de la prédiction avec des algorithmes uniques utilisant une métrique de distance spécifique. En effet, les résultats montrent une meilleure performance dans le remplissage des données manquantes et les meilleurs résultats dans la prédiction. 57 57 Un modèle d'ensemble fondé sur le raisonnement à partir de cas est proposé par \cite{YU2023110163} appliqué à la prédiction financière et au remplissage des données manquantes. Dans ce cas, pour retrouver les plus proches voisins, le modèle utilise trois mesures de distance différentes et une étape de vote pour l'intégration. Le modèle a été testé avec une base de données comportant onze dimensions d'informations financières provenant de 249 entreprises. La comparaison est faite avec deux objectifs. Premièrement, le remplissage des données manquantes avec d'autres algorithmes tels que KNN ou RandomForest, et deuxièmement, la comparaison de la prédiction avec des algorithmes uniques utilisant une métrique de distance spécifique. En effet, les résultats montrent une meilleure performance dans le remplissage des données manquantes et les meilleurs résultats dans la prédiction.
58 58
---------------------------------- 59 59 ----------------------------------
60 60
\colorbox{yellow}{Est-ce parce qu'il couple RàPC et RNA ? Ce n'est pas le seul. Pourquoi lui ?}\\ 61 61 \colorbox{yellow}{Est-ce parce qu'il couple RàPC et RNA ? Ce n'est pas le seul. Pourquoi lui ?}\\
\colorbox{yellow}{Est-ce le premier ?}\\ 62 62 \colorbox{yellow}{Est-ce le premier ?}\\
63 63
\colorbox{yellow}{En quoi est-il différent de Petrovic et al. 2016}\\ 64 64 \colorbox{yellow}{En quoi est-il différent de Petrovic et al. 2016}\\
65 65
\mfigure[!ht]{width=\textwidth}{./Figures/ModCBR1.png}{Cycle du RàPC modifié. (Traduit de \cite{ROLDANREYES20151})}{figMCBR1} 66 66 \mfigure[!ht]{width=\textwidth}{./Figures/ModCBR1.png}{Cycle du RàPC modifié. (Traduit de \cite{ROLDANREYES20151})}{figMCBR1}
67 67
L'utilisation du RàPC pour la création de recettes de cuisine à partir d'une base d'ingrédients et d'un ensemble de recettes déjà connues a été un cas d'application que les chercheurs du domaine ont trouvé intéressant à explorer. Malgré l'apparente simplicité de ce problème, celui-ci a permis de tester différentes approches et de concevoir différents modèles. 68 68 L'utilisation du RàPC pour la création de recettes de cuisine à partir d'une base d'ingrédients et d'un ensemble de recettes déjà connues a été un cas d'application que les chercheurs du domaine ont trouvé intéressant à explorer. Malgré l'apparente simplicité de ce problème, celui-ci a permis de tester différentes approches et de concevoir différents modèles.
69 69
%Le RàPC a également été mis en oeuvre dans différents travaux liés à la génération, l’analyse et la correction de textes écrits. Pour la réalisation de ces tâches, il est parfois nécessaire de transformer le texte en représentation numérique ou d’établir une fonction ou mesure qui va indiquer quels mots ont une proximité sémantique. Le travail de \cite{10.1007/978-3-319-24586-7_20} utilise le RàPC pour générer des histoires en utilisant le texte d'autres histoires. Le système décrit dans ce travail explore les transformations de forme possibles afin que la nouvelle histoire ne soit pas similaire aux histoires déjà connues tout en étant cohérente. Le travail est plus focalisé sur la phase de révision, car les résultats de la révision déterminent si l’histoire générée correspond aux critères spécifiés ou si le cycle d'adaptation doit être recommencé. L'adaptation se fait en cherchant dans la base des cas, les personnages, les contextes, les objets et en \colorbox{pink}{unifiant tout avec des actions ??}. Les résultats montrent que la plupart des histoires générées sont cohérentes mais qu'elles sont constituées de paragraphes très courts. Dans \cite{10.1007/978-3-030-58342-2_20}, les phrases écrites en langue française sont corrigées, ce travail n'utilise ni la transformation numérique \colorbox{yellow}{?} des phrases, ni des connaissances linguistiques. Le système conçu retrouve des phrases similaires en utilisant l'algorithme LCS (\textit{Longest Common Subsequence}) et en calculant la distance entre toutes les phrases \colorbox{yellow}{lesquelles ?} pour savoir \colorbox{pink}{je ne comprends pas la suite }si elle est bien écrite ou pas. Si la phrase n’est pas bien écrite, le système peut proposer une correction en changeant certains mots selon le contexte et calculer à nouveau les distances mesurant la cohérence et pertinence de la phrase. 70 70 %Le RàPC a également été mis en oeuvre dans différents travaux liés à la génération, l’analyse et la correction de textes écrits. Pour la réalisation de ces tâches, il est parfois nécessaire de transformer le texte en représentation numérique ou d’établir une fonction ou mesure qui va indiquer quels mots ont une proximité sémantique. Le travail de \cite{10.1007/978-3-319-24586-7_20} utilise le RàPC pour générer des histoires en utilisant le texte d'autres histoires. Le système décrit dans ce travail explore les transformations de forme possibles afin que la nouvelle histoire ne soit pas similaire aux histoires déjà connues tout en étant cohérente. Le travail est plus focalisé sur la phase de révision, car les résultats de la révision déterminent si l’histoire générée correspond aux critères spécifiés ou si le cycle d'adaptation doit être recommencé. L'adaptation se fait en cherchant dans la base des cas, les personnages, les contextes, les objets et en \colorbox{pink}{unifiant tout avec des actions ??}. Les résultats montrent que la plupart des histoires générées sont cohérentes mais qu'elles sont constituées de paragraphes très courts. Dans \cite{10.1007/978-3-030-58342-2_20}, les phrases écrites en langue française sont corrigées, ce travail n'utilise ni la transformation numérique \colorbox{yellow}{?} des phrases, ni des connaissances linguistiques. Le système conçu retrouve des phrases similaires en utilisant l'algorithme LCS (\textit{Longest Common Subsequence}) et en calculant la distance entre toutes les phrases \colorbox{yellow}{lesquelles ?} pour savoir \colorbox{pink}{je ne comprends pas la suite }si elle est bien écrite ou pas. Si la phrase n’est pas bien écrite, le système peut proposer une correction en changeant certains mots selon le contexte et calculer à nouveau les distances mesurant la cohérence et pertinence de la phrase.
71 71
\mfigure[!ht]{width=\textwidth}{./Figures/taxonomieEIAH.png}{Taxonomie des techniques algorithmiques employées pour des modules de recommandation dans les EIAH (Traduit de \cite{Obeid})}{figTax} 72 72 \mfigure[!ht]{width=\textwidth}{./Figures/taxonomieEIAH.png}{Taxonomie des techniques algorithmiques employées pour des modules de recommandation dans les EIAH (Traduit de \cite{Obeid})}{figTax}
73 73
%\begin{figure} 74 74 %\begin{figure}
%\centering 75 75 %\centering
%\includegraphics[scale=21]{./Figures/taxonomieEIAH.png} 76 76 %\includegraphics[scale=21]{./Figures/taxonomieEIAH.png}
\relax 1 1 \relax
\providecommand\babel@aux[2]{} 2 2 \providecommand\babel@aux[2]{}
\@nameuse{bbl@beforestart} 3 3 \@nameuse{bbl@beforestart}
\catcode `:\active 4 4 \catcode `:\active
\catcode `;\active 5 5 \catcode `;\active
\catcode `!\active 6 6 \catcode `!\active
\catcode `?\active 7 7 \catcode `?\active
\providecommand\hyper@newdestlabel[2]{} 8 8 \providecommand\hyper@newdestlabel[2]{}
\providecommand\HyperFirstAtBeginDocument{\AtBeginDocument} 9 9 \providecommand\HyperFirstAtBeginDocument{\AtBeginDocument}
\HyperFirstAtBeginDocument{\ifx\hyper@anchor\@undefined 10 10 \HyperFirstAtBeginDocument{\ifx\hyper@anchor\@undefined
\global\let\oldnewlabel\newlabel 11 11 \global\let\oldnewlabel\newlabel
\gdef\newlabel#1#2{\newlabelxx{#1}#2} 12 12 \gdef\newlabel#1#2{\newlabelxx{#1}#2}
\gdef\newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}} 13 13 \gdef\newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}}
\AtEndDocument{\ifx\hyper@anchor\@undefined 14 14 \AtEndDocument{\ifx\hyper@anchor\@undefined
\let\newlabel\oldnewlabel 15 15 \let\newlabel\oldnewlabel
\fi} 16 16 \fi}
\fi} 17 17 \fi}
\global\let\hyper@last\relax 18 18 \global\let\hyper@last\relax
\gdef\HyperFirstAtBeginDocument#1{#1} 19 19 \gdef\HyperFirstAtBeginDocument#1{#1}
\providecommand\HyField@AuxAddToFields[1]{} 20 20 \providecommand\HyField@AuxAddToFields[1]{}
\providecommand\HyField@AuxAddToCoFields[2]{} 21 21 \providecommand\HyField@AuxAddToCoFields[2]{}
\providecommand \oddpage@label [2]{} 22 22 \providecommand \oddpage@label [2]{}
\babel@aux{french}{} 23 23 \babel@aux{french}{}
\@writefile{toc}{\contentsline {part}{I\hspace {1em}Contexte et Problématiques}{1}{part.1}\protected@file@percent } 24 24 \@writefile{toc}{\contentsline {part}{I\hspace {1em}Contexte et Problématiques}{1}{part.1}\protected@file@percent }
\citation{Nkambou} 25 25 \citation{Nkambou}
\citation{doi:10.1177/1754337116651013} 26 26 \citation{doi:10.1177/1754337116651013}
\@writefile{toc}{\contentsline {chapter}{\numberline {1}Introduction}{3}{chapter.1}\protected@file@percent } 27 27 \@writefile{toc}{\contentsline {chapter}{\numberline {1}Introduction}{3}{chapter.1}\protected@file@percent }
\@writefile{lof}{\addvspace {10\p@ }} 28 28 \@writefile{lof}{\addvspace {10\p@ }}
\@writefile{lot}{\addvspace {10\p@ }} 29 29 \@writefile{lot}{\addvspace {10\p@ }}
\@writefile{toc}{\contentsline {section}{\numberline {1.1}Contributions Principales}{4}{section.1.1}\protected@file@percent } 30 30 \@writefile{toc}{\contentsline {section}{\numberline {1.1}Contributions Principales}{4}{section.1.1}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\numberline {1.2}Plan de la thèse}{5}{section.1.2}\protected@file@percent } 31 31 \@writefile{toc}{\contentsline {section}{\numberline {1.2}Plan de la thèse}{5}{section.1.2}\protected@file@percent }
\@input{./chapters/contexte2.aux} 32 32 \@input{./chapters/contexte2.aux}
\@writefile{toc}{\contentsline {part}{II\hspace {1em}État de l'art}{21}{part.2}\protected@file@percent } 33 33 \@writefile{toc}{\contentsline {part}{II\hspace {1em}État de l'art}{21}{part.2}\protected@file@percent }
\@input{./chapters/EIAH.aux} 34 34 \@input{./chapters/EIAH.aux}
\@input{./chapters/CBR.aux} 35 35 \@input{./chapters/CBR.aux}
\@writefile{toc}{\contentsline {part}{III\hspace {1em}Contributions}{37}{part.3}\protected@file@percent } 36 36 \@writefile{toc}{\contentsline {part}{III\hspace {1em}Contributions}{37}{part.3}\protected@file@percent }
\@input{./chapters/Architecture.aux} 37 37 \@input{./chapters/Architecture.aux}
\@input{./chapters/TS.aux} 38 38 \@input{./chapters/TS.aux}
\bibstyle{apalike} 39 39 \bibstyle{apalike}
\bibdata{main.bib} 40 40 \bibdata{main.bib}
\bibcite{doi:10.3233/AIC-1994-7104}{Aamodt and Plaza, 1994} 41 41 \bibcite{doi:10.3233/AIC-1994-7104}{Aamodt and Plaza, 1994}
\bibcite{ALABDULRAHMAN2021114061}{Alabdulrahman and Viktor, 2021} 42 42 \bibcite{ALABDULRAHMAN2021114061}{Alabdulrahman and Viktor, 2021}
\bibcite{Arthurs}{Arthurs et~al., 2019} 43 43 \bibcite{Arthurs}{Arthurs et~al., 2019}
\bibcite{Auer}{Auer et~al., 2021} 44 44 \bibcite{Auer}{Auer et~al., 2021}
\bibcite{10.1007/978-3-642-15973-2_50}{Butdee and Tichkiewitch, 2011} 45 45 \bibcite{10.1007/978-3-642-15973-2_50}{Butdee and Tichkiewitch, 2011}
\bibcite{CHIU2023100118}{Chiu et~al., 2023} 46 46 \bibcite{CHIU2023100118}{Chiu et~al., 2023}
\bibcite{Riesbeck1989}{C.K. and R.C., 1989} 47 47 \bibcite{Riesbeck1989}{C.K. and R.C., 1989}
\bibcite{10.1145/3459665}{Cunningham and Delany, 2021} 48 48 \bibcite{10.1145/3459665}{Cunningham and Delany, 2021}
\bibcite{EZALDEEN2022100700}{Ezaldeen et~al., 2022} 49 49 \bibcite{EZALDEEN2022100700}{Ezaldeen et~al., 2022}
\bibcite{10.1007/978-3-030-58342-2_5}{Feely et~al., 2020} 50 50 \bibcite{10.1007/978-3-030-58342-2_5}{Feely et~al., 2020}
\bibcite{10.1007/978-3-319-47096-2_11}{Grace et~al., 2016} 51 51 \bibcite{10.1007/978-3-319-47096-2_11}{Grace et~al., 2016}
\bibcite{9434422}{Gupta et~al., 2021} 52 52 \bibcite{9434422}{Gupta et~al., 2021}
\bibcite{hajduk2019cognitive}{Hajduk et~al., 2019} 53 53 \bibcite{hajduk2019cognitive}{Hajduk et~al., 2019}
\bibcite{doi:10.1177/1754337116651013}{Henriet et~al., 2017} 54 54 \bibcite{doi:10.1177/1754337116651013}{Henriet et~al., 2017}
\bibcite{10.1007/978-3-030-01081-2_9}{Henriet and Greffier, 2018} 55 55 \bibcite{10.1007/978-3-030-01081-2_9}{Henriet and Greffier, 2018}
\bibcite{Hoang}{Hoang, 2018} 56 56 \bibcite{Hoang}{Hoang, 2018}
\bibcite{HU2025127130}{Hu et~al., 2025} 57 57 \bibcite{HU2025127130}{Hu et~al., 2025}
\bibcite{HUANG2023104684}{Huang et~al., 2023} 58 58 \bibcite{HUANG2023104684}{Huang et~al., 2023}
\bibcite{INGKAVARA2022100086}{Ingkavara et~al., 2022} 59 59 \bibcite{INGKAVARA2022100086}{Ingkavara et~al., 2022}
\bibcite{Daubias2011}{Jean-Daubias, 2011} 60 60 \bibcite{Daubias2011}{Jean-Daubias, 2011}
\bibcite{JUNG20095695}{Jung et~al., 2009} 61 61 \bibcite{JUNG20095695}{Jung et~al., 2009}
\bibcite{KOLODNER1983281}{Kolodner, 1983} 62 62 \bibcite{KOLODNER1983281}{Kolodner, 1983}
\bibcite{LALITHA2020583}{Lalitha and Sreeja, 2020} 63 63 \bibcite{LALITHA2020583}{Lalitha and Sreeja, 2020}
\bibcite{min8100434}{Leikola et~al., 2018} 64 64 \bibcite{min8100434}{Leikola et~al., 2018}
\bibcite{10.1007/978-3-030-58342-2_20}{Lepage et~al., 2020} 65 65 \bibcite{10.1007/978-3-030-58342-2_20}{Lepage et~al., 2020}
\bibcite{9870279}{Lin, 2022} 66 66 \bibcite{9870279}{Lin, 2022}
\bibcite{10.1007/978-3-319-61030-6_1}{Maher and Grace, 2017} 67 67 \bibcite{10.1007/978-3-319-61030-6_1}{Maher and Grace, 2017}
\bibcite{MUANGPRATHUB2020e05227}{Muangprathub et~al., 2020} 68 68 \bibcite{MUANGPRATHUB2020e05227}{Muangprathub et~al., 2020}
\bibcite{Muller}{Müller and Bergmann, 2015} 69 69 \bibcite{Muller}{Müller and Bergmann, 2015}
\bibcite{Nkambou}{Nkambou et~al., 2010} 70 70 \bibcite{Nkambou}{Nkambou et~al., 2010}
\bibcite{Obeid}{Obeid et~al., 2022} 71 71 \bibcite{Obeid}{Obeid et~al., 2022}
\bibcite{10.1007/978-3-319-24586-7_20}{Onta{\~{n}}{\'o}n et~al., 2015} 72 72 \bibcite{10.1007/978-3-319-24586-7_20}{Onta{\~{n}}{\'o}n et~al., 2015}
\bibcite{PAREJASLLANOVARCED2024111469}{Parejas-Llanovarced et~al., 2024} 73 73 \bibcite{PAREJASLLANOVARCED2024111469}{Parejas-Llanovarced et~al., 2024}
\bibcite{PETROVIC201617}{Petrovic et~al., 2016} 74 74 \bibcite{PETROVIC201617}{Petrovic et~al., 2016}
\bibcite{Richter2013}{Richter and Weber, 2013} 75 75 \bibcite{Richter2013}{Richter and Weber, 2013}
\bibcite{RICHTER20093}{Richter, 2009} 76 76 \bibcite{RICHTER20093}{Richter, 2009}
\bibcite{Robertson2014ARO}{Robertson and Watson, 2014} 77 77 \bibcite{Robertson2014ARO}{Robertson and Watson, 2014}
\bibcite{ROLDANREYES20151}{{Roldan Reyes} et~al., 2015} 78 78 \bibcite{ROLDANREYES20151}{{Roldan Reyes} et~al., 2015}
\bibcite{schank+abelson77}{Schank and Abelson, 1977} 79 79 \bibcite{schank+abelson77}{Schank and Abelson, 1977}
\bibcite{pmlr-v108-seznec20a}{Seznec et~al., 2020} 80 80 \bibcite{pmlr-v108-seznec20a}{Seznec et~al., 2020}
\bibcite{9072123}{Sinaga and Yang, 2020} 81 81 \bibcite{9072123}{Sinaga and Yang, 2020}
\bibcite{10.1007/978-3-030-01081-2_25}{Smyth and Cunningham, 2018} 82 82 \bibcite{10.1007/978-3-030-01081-2_25}{Smyth and Cunningham, 2018}
\bibcite{10.1007/978-3-030-58342-2_8}{Smyth and Willemsen, 2020} 83 83 \bibcite{10.1007/978-3-030-58342-2_8}{Smyth and Willemsen, 2020}
\bibcite{Soto2}{Soto-Forero et~al., 2024a} 84 84 \bibcite{Soto2}{Soto-Forero et~al., 2024a}
\bibcite{10.1007/978-3-031-63646-2_13}{Soto-Forero et~al., 2024b} 85 85 \bibcite{10.1007/978-3-031-63646-2_13}{Soto-Forero et~al., 2024b}
\bibcite{SU2022109547}{Su et~al., 2022} 86 86 \bibcite{SU2022109547}{Su et~al., 2022}
\bibcite{8495930}{Supic, 2018} 87 87 \bibcite{8495930}{Supic, 2018}
88 \bibcite{buildings13030651}{Uysal and Sonmez, 2023}
\bibcite{WANG2021331}{Wang et~al., 2021} 88 89 \bibcite{WANG2021331}{Wang et~al., 2021}
\bibcite{wolf2024keep}{Wolf et~al., 2024} 89 90 \bibcite{wolf2024keep}{Wolf et~al., 2024}
\bibcite{9627973}{Xu et~al., 2021} 90 91 \bibcite{9627973}{Xu et~al., 2021}
92 \bibcite{YU2023110163}{Yu and Li, 2023}
\bibcite{ZHANG2021100025}{Zhang. and Aslan, 2021} 91 93 \bibcite{ZHANG2021100025}{Zhang. and Aslan, 2021}
\bibcite{ZHAO2023118535}{Zhao et~al., 2023} 92 94 \bibcite{ZHAO2023118535}{Zhao et~al., 2023}
\bibcite{Zhou2021}{Zhou and Wang, 2021} 93 95 \bibcite{Zhou2021}{Zhou and Wang, 2021}
\bibcite{jmse10040464}{Zuluaga et~al., 2022} 94 96 \bibcite{jmse10040464}{Zuluaga et~al., 2022}
\gdef \@abspage@last{72} 95 97 \gdef \@abspage@last{72}
\begin{thebibliography}{} 1 1 \begin{thebibliography}{}
2 2
\bibitem[Aamodt and Plaza, 1994]{doi:10.3233/AIC-1994-7104} 3 3 \bibitem[Aamodt and Plaza, 1994]{doi:10.3233/AIC-1994-7104}
Aamodt, A. and Plaza, E. (1994). 4 4 Aamodt, A. and Plaza, E. (1994).
\newblock Case-based reasoning: Foundational issues, methodological variations, 5 5 \newblock Case-based reasoning: Foundational issues, methodological variations,
and system approaches. 6 6 and system approaches.
\newblock {\em AI Communications}, 7(1):39--59. 7 7 \newblock {\em AI Communications}, 7(1):39--59.
8 8
\bibitem[Alabdulrahman and Viktor, 2021]{ALABDULRAHMAN2021114061} 9 9 \bibitem[Alabdulrahman and Viktor, 2021]{ALABDULRAHMAN2021114061}
Alabdulrahman, R. and Viktor, H. (2021). 10 10 Alabdulrahman, R. and Viktor, H. (2021).
\newblock Catering for unique tastes: Targeting grey-sheep users recommender 11 11 \newblock Catering for unique tastes: Targeting grey-sheep users recommender
systems through one-class machine learning. 12 12 systems through one-class machine learning.
\newblock {\em Expert Systems with Applications}, 166:114061. 13 13 \newblock {\em Expert Systems with Applications}, 166:114061.
14 14
\bibitem[Arthurs et~al., 2019]{Arthurs} 15 15 \bibitem[Arthurs et~al., 2019]{Arthurs}
Arthurs, N., Stenhaug, B., Karayev, S., and Piech, C. (2019). 16 16 Arthurs, N., Stenhaug, B., Karayev, S., and Piech, C. (2019).
\newblock Grades are not normal: Improving exam score models using the 17 17 \newblock Grades are not normal: Improving exam score models using the
logit-normal distribution. 18 18 logit-normal distribution.
\newblock In {\em International Conference on Educational Data Mining (EDM)}, 19 19 \newblock In {\em International Conference on Educational Data Mining (EDM)},
page~6. 20 20 page~6.
21 21
\bibitem[Auer et~al., 2021]{Auer} 22 22 \bibitem[Auer et~al., 2021]{Auer}
Auer, F., Lenarduzzi, V., Felderer, M., and Taibi, D. (2021). 23 23 Auer, F., Lenarduzzi, V., Felderer, M., and Taibi, D. (2021).
\newblock From monolithic systems to microservices: An assessment framework. 24 24 \newblock From monolithic systems to microservices: An assessment framework.
\newblock {\em Information and Software Technology}, 137:106600. 25 25 \newblock {\em Information and Software Technology}, 137:106600.
26 26
\bibitem[Butdee and Tichkiewitch, 2011]{10.1007/978-3-642-15973-2_50} 27 27 \bibitem[Butdee and Tichkiewitch, 2011]{10.1007/978-3-642-15973-2_50}
Butdee, S. and Tichkiewitch, S. (2011). 28 28 Butdee, S. and Tichkiewitch, S. (2011).
\newblock Case-based reasoning for adaptive aluminum extrusion die design 29 29 \newblock Case-based reasoning for adaptive aluminum extrusion die design
together with parameters by neural networks. 30 30 together with parameters by neural networks.
\newblock In Bernard, A., editor, {\em Global Product Development}, pages 31 31 \newblock In Bernard, A., editor, {\em Global Product Development}, pages
491--496, Berlin, Heidelberg. Springer Berlin Heidelberg. 32 32 491--496, Berlin, Heidelberg. Springer Berlin Heidelberg.
33 33
\bibitem[Chiu et~al., 2023]{CHIU2023100118} 34 34 \bibitem[Chiu et~al., 2023]{CHIU2023100118}
Chiu, T.~K., Xia, Q., Zhou, X., Chai, C.~S., and Cheng, M. (2023). 35 35 Chiu, T.~K., Xia, Q., Zhou, X., Chai, C.~S., and Cheng, M. (2023).
\newblock Systematic literature review on opportunities, challenges, and future 36 36 \newblock Systematic literature review on opportunities, challenges, and future
research recommendations of artificial intelligence in education. 37 37 research recommendations of artificial intelligence in education.
\newblock {\em Computers and Education: Artificial Intelligence}, 4:100118. 38 38 \newblock {\em Computers and Education: Artificial Intelligence}, 4:100118.
39 39
\bibitem[C.K. and R.C., 1989]{Riesbeck1989} 40 40 \bibitem[C.K. and R.C., 1989]{Riesbeck1989}
C.K., R. and R.C., S. (1989). 41 41 C.K., R. and R.C., S. (1989).
\newblock {\em Inside Case-Based Reasoning}. 42 42 \newblock {\em Inside Case-Based Reasoning}.
\newblock Psychology Press. 43 43 \newblock Psychology Press.
44 44
\bibitem[Cunningham and Delany, 2021]{10.1145/3459665} 45 45 \bibitem[Cunningham and Delany, 2021]{10.1145/3459665}
Cunningham, P. and Delany, S.~J. (2021). 46 46 Cunningham, P. and Delany, S.~J. (2021).
\newblock K-nearest neighbour classifiers - a tutorial. 47 47 \newblock K-nearest neighbour classifiers - a tutorial.
\newblock {\em ACM Comput. Surv.}, 54(6). 48 48 \newblock {\em ACM Comput. Surv.}, 54(6).
49 49
\bibitem[Ezaldeen et~al., 2022]{EZALDEEN2022100700} 50 50 \bibitem[Ezaldeen et~al., 2022]{EZALDEEN2022100700}
Ezaldeen, H., Misra, R., Bisoy, S.~K., Alatrash, R., and Priyadarshini, R. 51 51 Ezaldeen, H., Misra, R., Bisoy, S.~K., Alatrash, R., and Priyadarshini, R.
(2022). 52 52 (2022).
\newblock A hybrid e-learning recommendation integrating adaptive profiling and 53 53 \newblock A hybrid e-learning recommendation integrating adaptive profiling and
sentiment analysis. 54 54 sentiment analysis.
\newblock {\em Journal of Web Semantics}, 72:100700. 55 55 \newblock {\em Journal of Web Semantics}, 72:100700.
56 56
\bibitem[Feely et~al., 2020]{10.1007/978-3-030-58342-2_5} 57 57 \bibitem[Feely et~al., 2020]{10.1007/978-3-030-58342-2_5}
Feely, C., Caulfield, B., Lawlor, A., and Smyth, B. (2020). 58 58 Feely, C., Caulfield, B., Lawlor, A., and Smyth, B. (2020).
\newblock Using case-based reasoning to predict marathon performance and 59 59 \newblock Using case-based reasoning to predict marathon performance and
recommend tailored training plans. 60 60 recommend tailored training plans.
\newblock In Watson, I. and Weber, R., editors, {\em Case-Based Reasoning 61 61 \newblock In Watson, I. and Weber, R., editors, {\em Case-Based Reasoning
Research and Development}, pages 67--81, Cham. Springer International 62 62 Research and Development}, pages 67--81, Cham. Springer International
Publishing. 63 63 Publishing.
64 64
\bibitem[Grace et~al., 2016]{10.1007/978-3-319-47096-2_11} 65 65 \bibitem[Grace et~al., 2016]{10.1007/978-3-319-47096-2_11}
Grace, K., Maher, M.~L., Wilson, D.~C., and Najjar, N.~A. (2016). 66 66 Grace, K., Maher, M.~L., Wilson, D.~C., and Najjar, N.~A. (2016).
\newblock Combining cbr and deep learning to generate surprising recipe 67 67 \newblock Combining cbr and deep learning to generate surprising recipe
designs. 68 68 designs.
\newblock In Goel, A., D{\'i}az-Agudo, M.~B., and Roth-Berghofer, T., editors, 69 69 \newblock In Goel, A., D{\'i}az-Agudo, M.~B., and Roth-Berghofer, T., editors,
{\em Case-Based Reasoning Research and Development}, pages 154--169, Cham. 70 70 {\em Case-Based Reasoning Research and Development}, pages 154--169, Cham.
Springer International Publishing. 71 71 Springer International Publishing.
72 72
\bibitem[Gupta et~al., 2021]{9434422} 73 73 \bibitem[Gupta et~al., 2021]{9434422}
Gupta, S., Chaudhari, S., Joshi, G., and Yağan, O. (2021). 74 74 Gupta, S., Chaudhari, S., Joshi, G., and Yağan, O. (2021).
\newblock Multi-armed bandits with correlated arms. 75 75 \newblock Multi-armed bandits with correlated arms.
\newblock {\em IEEE Transactions on Information Theory}, 67(10):6711--6732. 76 76 \newblock {\em IEEE Transactions on Information Theory}, 67(10):6711--6732.
77 77
\bibitem[Hajduk et~al., 2019]{hajduk2019cognitive} 78 78 \bibitem[Hajduk et~al., 2019]{hajduk2019cognitive}
Hajduk, M., Sukop, M., and Haun, M. (2019). 79 79 Hajduk, M., Sukop, M., and Haun, M. (2019).
\newblock {\em Cognitive Multi-agent Systems: Structures, Strategies and 80 80 \newblock {\em Cognitive Multi-agent Systems: Structures, Strategies and
Applications to Mobile Robotics and Robosoccer}. 81 81 Applications to Mobile Robotics and Robosoccer}.
\newblock Studies in Systems, Decision and Control. Springer International 82 82 \newblock Studies in Systems, Decision and Control. Springer International
Publishing. 83 83 Publishing.
84 84
\bibitem[Henriet et~al., 2017]{doi:10.1177/1754337116651013} 85 85 \bibitem[Henriet et~al., 2017]{doi:10.1177/1754337116651013}
Henriet, J., Christophe, L., and Laurent, P. (2017). 86 86 Henriet, J., Christophe, L., and Laurent, P. (2017).
\newblock Artificial intelligence-virtual trainer: An educative system based on 87 87 \newblock Artificial intelligence-virtual trainer: An educative system based on
artificial intelligence and designed to produce varied and consistent 88 88 artificial intelligence and designed to produce varied and consistent
training lessons. 89 89 training lessons.
\newblock {\em Proceedings of the Institution of Mechanical Engineers, Part P: 90 90 \newblock {\em Proceedings of the Institution of Mechanical Engineers, Part P:
Journal of Sports Engineering and Technology}, 231(2):110--124. 91 91 Journal of Sports Engineering and Technology}, 231(2):110--124.
92 92
\bibitem[Henriet and Greffier, 2018]{10.1007/978-3-030-01081-2_9} 93 93 \bibitem[Henriet and Greffier, 2018]{10.1007/978-3-030-01081-2_9}
Henriet, J. and Greffier, F. (2018). 94 94 Henriet, J. and Greffier, F. (2018).
\newblock Ai-vt: An example of cbr that generates a variety of solutions to the 95 95 \newblock Ai-vt: An example of cbr that generates a variety of solutions to the
same problem. 96 96 same problem.
\newblock In Cox, M.~T., Funk, P., and Begum, S., editors, {\em Case-Based 97 97 \newblock In Cox, M.~T., Funk, P., and Begum, S., editors, {\em Case-Based
Reasoning Research and Development}, pages 124--139, Cham. Springer 98 98 Reasoning Research and Development}, pages 124--139, Cham. Springer
International Publishing. 99 99 International Publishing.
100 100
\bibitem[Hoang, 2018]{Hoang} 101 101 \bibitem[Hoang, 2018]{Hoang}
Hoang, L. (2018). 102 102 Hoang, L. (2018).
\newblock {\em La formule du savoir. Une philosophie unifiée du savoir fondée 103 103 \newblock {\em La formule du savoir. Une philosophie unifiée du savoir fondée
sur le théorème de Bayes}. 104 104 sur le théorème de Bayes}.
\newblock EDP Sciences. 105 105 \newblock EDP Sciences.
106 106
\bibitem[Hu et~al., 2025]{HU2025127130} 107 107 \bibitem[Hu et~al., 2025]{HU2025127130}
Hu, B., Ma, Y., Liu, Z., and Wang, H. (2025). 108 108 Hu, B., Ma, Y., Liu, Z., and Wang, H. (2025).
\newblock A social importance and category enhanced cold-start user 109 109 \newblock A social importance and category enhanced cold-start user
recommendation system. 110 110 recommendation system.
\newblock {\em Expert Systems with Applications}, 277:127130. 111 111 \newblock {\em Expert Systems with Applications}, 277:127130.
112 112
\bibitem[Huang et~al., 2023]{HUANG2023104684} 113 113 \bibitem[Huang et~al., 2023]{HUANG2023104684}
Huang, A.~Y., Lu, O.~H., and Yang, S.~J. (2023). 114 114 Huang, A.~Y., Lu, O.~H., and Yang, S.~J. (2023).
\newblock Effects of artificial intelligence–enabled personalized 115 115 \newblock Effects of artificial intelligence–enabled personalized
recommendations on learners’ learning engagement, motivation, and outcomes 116 116 recommendations on learners’ learning engagement, motivation, and outcomes
in a flipped classroom. 117 117 in a flipped classroom.
\newblock {\em Computers and Education}, 194:104684. 118 118 \newblock {\em Computers and Education}, 194:104684.
119 119
\bibitem[Ingkavara et~al., 2022]{INGKAVARA2022100086} 120 120 \bibitem[Ingkavara et~al., 2022]{INGKAVARA2022100086}
Ingkavara, T., Panjaburee, P., Srisawasdi, N., and Sajjapanroj, S. (2022). 121 121 Ingkavara, T., Panjaburee, P., Srisawasdi, N., and Sajjapanroj, S. (2022).
\newblock The use of a personalized learning approach to implementing 122 122 \newblock The use of a personalized learning approach to implementing
self-regulated online learning. 123 123 self-regulated online learning.
\newblock {\em Computers and Education: Artificial Intelligence}, 3:100086. 124 124 \newblock {\em Computers and Education: Artificial Intelligence}, 3:100086.
125 125
\bibitem[Jean-Daubias, 2011]{Daubias2011} 126 126 \bibitem[Jean-Daubias, 2011]{Daubias2011}
Jean-Daubias, S. (2011). 127 127 Jean-Daubias, S. (2011).
\newblock Ingénierie des profils d'apprenants. 128 128 \newblock Ingénierie des profils d'apprenants.
129 129
\bibitem[Jung et~al., 2009]{JUNG20095695} 130 130 \bibitem[Jung et~al., 2009]{JUNG20095695}
Jung, S., Lim, T., and Kim, D. (2009). 131 131 Jung, S., Lim, T., and Kim, D. (2009).
\newblock Integrating radial basis function networks with case-based reasoning 132 132 \newblock Integrating radial basis function networks with case-based reasoning
for product design. 133 133 for product design.
\newblock {\em Expert Systems with Applications}, 36(3, Part 1):5695--5701. 134 134 \newblock {\em Expert Systems with Applications}, 36(3, Part 1):5695--5701.
135 135
\bibitem[Kolodner, 1983]{KOLODNER1983281} 136 136 \bibitem[Kolodner, 1983]{KOLODNER1983281}
Kolodner, J.~L. (1983). 137 137 Kolodner, J.~L. (1983).
\newblock Reconstructive memory: A computer model. 138 138 \newblock Reconstructive memory: A computer model.
\newblock {\em Cognitive Science}, 7(4):281--328. 139 139 \newblock {\em Cognitive Science}, 7(4):281--328.
140 140
\bibitem[Lalitha and Sreeja, 2020]{LALITHA2020583} 141 141 \bibitem[Lalitha and Sreeja, 2020]{LALITHA2020583}
Lalitha, T.~B. and Sreeja, P.~S. (2020). 142 142 Lalitha, T.~B. and Sreeja, P.~S. (2020).
\newblock Personalised self-directed learning recommendation system. 143 143 \newblock Personalised self-directed learning recommendation system.
\newblock {\em Procedia Computer Science}, 171:583--592. 144 144 \newblock {\em Procedia Computer Science}, 171:583--592.
\newblock Third International Conference on Computing and Network 145 145 \newblock Third International Conference on Computing and Network
Communications (CoCoNet'19). 146 146 Communications (CoCoNet'19).
147 147
\bibitem[Leikola et~al., 2018]{min8100434} 148 148 \bibitem[Leikola et~al., 2018]{min8100434}
Leikola, M., Sauer, C., Rintala, L., Aromaa, J., and Lundström, M. (2018). 149 149 Leikola, M., Sauer, C., Rintala, L., Aromaa, J., and Lundström, M. (2018).
\newblock Assessing the similarity of cyanide-free gold leaching processes: A 150 150 \newblock Assessing the similarity of cyanide-free gold leaching processes: A
case-based reasoning application. 151 151 case-based reasoning application.
\newblock {\em Minerals}, 8(10). 152 152 \newblock {\em Minerals}, 8(10).
153 153
\bibitem[Lepage et~al., 2020]{10.1007/978-3-030-58342-2_20} 154 154 \bibitem[Lepage et~al., 2020]{10.1007/978-3-030-58342-2_20}
Lepage, Y., Lieber, J., Mornard, I., Nauer, E., Romary, J., and Sies, R. 155 155 Lepage, Y., Lieber, J., Mornard, I., Nauer, E., Romary, J., and Sies, R.
(2020). 156 156 (2020).
\newblock The french correction: When retrieval is harder to specify than 157 157 \newblock The french correction: When retrieval is harder to specify than
adaptation. 158 158 adaptation.
\newblock In Watson, I. and Weber, R., editors, {\em Case-Based Reasoning 159 159 \newblock In Watson, I. and Weber, R., editors, {\em Case-Based Reasoning
Research and Development}, pages 309--324, Cham. Springer International 160 160 Research and Development}, pages 309--324, Cham. Springer International
Publishing. 161 161 Publishing.
162 162
\bibitem[Lin, 2022]{9870279} 163 163 \bibitem[Lin, 2022]{9870279}
Lin, B. (2022). 164 164 Lin, B. (2022).
\newblock Evolutionary multi-armed bandits with genetic thompson sampling. 165 165 \newblock Evolutionary multi-armed bandits with genetic thompson sampling.
\newblock In {\em 2022 IEEE Congress on Evolutionary Computation (CEC)}, pages 166 166 \newblock In {\em 2022 IEEE Congress on Evolutionary Computation (CEC)}, pages
1--8. 167 167 1--8.
168 168
\bibitem[Maher and Grace, 2017]{10.1007/978-3-319-61030-6_1} 169 169 \bibitem[Maher and Grace, 2017]{10.1007/978-3-319-61030-6_1}
Maher, M.~L. and Grace, K. (2017). 170 170 Maher, M.~L. and Grace, K. (2017).
\newblock Encouraging curiosity in case-based reasoning and recommender 171 171 \newblock Encouraging curiosity in case-based reasoning and recommender
systems. 172 172 systems.
\newblock In Aha, D.~W. and Lieber, J., editors, {\em Case-Based Reasoning 173 173 \newblock In Aha, D.~W. and Lieber, J., editors, {\em Case-Based Reasoning
Research and Development}, pages 3--15, Cham. Springer International 174 174 Research and Development}, pages 3--15, Cham. Springer International
Publishing. 175 175 Publishing.
176 176
\bibitem[Muangprathub et~al., 2020]{MUANGPRATHUB2020e05227} 177 177 \bibitem[Muangprathub et~al., 2020]{MUANGPRATHUB2020e05227}
Muangprathub, J., Boonjing, V., and Chamnongthai, K. (2020). 178 178 Muangprathub, J., Boonjing, V., and Chamnongthai, K. (2020).
\newblock Learning recommendation with formal concept analysis for intelligent 179 179 \newblock Learning recommendation with formal concept analysis for intelligent
tutoring system. 180 180 tutoring system.
\newblock {\em Heliyon}, 6(10):e05227. 181 181 \newblock {\em Heliyon}, 6(10):e05227.
182 182
\bibitem[Müller and Bergmann, 2015]{Muller} 183 183 \bibitem[Müller and Bergmann, 2015]{Muller}
Müller, G. and Bergmann, R. (2015). 184 184 Müller, G. and Bergmann, R. (2015).
\newblock Cookingcake: A framework for the adaptation of cooking recipes 185 185 \newblock Cookingcake: A framework for the adaptation of cooking recipes
represented as workflows. 186 186 represented as workflows.
\newblock In {\em International Conference on Case-Based Reasoning}. 187 187 \newblock In {\em International Conference on Case-Based Reasoning}.
188 188
\bibitem[Nkambou et~al., 2010]{Nkambou} 189 189 \bibitem[Nkambou et~al., 2010]{Nkambou}
Nkambou, R., Bourdeau, J., and Mizoguchi, R. (2010). 190 190 Nkambou, R., Bourdeau, J., and Mizoguchi, R. (2010).
\newblock {\em Advances in Intelligent Tutoring Systems}. 191 191 \newblock {\em Advances in Intelligent Tutoring Systems}.
\newblock Springer Berlin, Heidelberg, 1 edition. 192 192 \newblock Springer Berlin, Heidelberg, 1 edition.
193 193
\bibitem[Obeid et~al., 2022]{Obeid} 194 194 \bibitem[Obeid et~al., 2022]{Obeid}
Obeid, C., Lahoud, C., Khoury, H.~E., and Champin, P. (2022). 195 195 Obeid, C., Lahoud, C., Khoury, H.~E., and Champin, P. (2022).
\newblock A novel hybrid recommender system approach for student academic 196 196 \newblock A novel hybrid recommender system approach for student academic
advising named cohrs, supported by case-based reasoning and ontology. 197 197 advising named cohrs, supported by case-based reasoning and ontology.
\newblock {\em Computer Science and Information Systems}, 19(2):979–1005. 198 198 \newblock {\em Computer Science and Information Systems}, 19(2):979–1005.
199 199
\bibitem[Onta{\~{n}}{\'o}n et~al., 2015]{10.1007/978-3-319-24586-7_20} 200 200 \bibitem[Onta{\~{n}}{\'o}n et~al., 2015]{10.1007/978-3-319-24586-7_20}
Onta{\~{n}}{\'o}n, S., Plaza, E., and Zhu, J. (2015). 201 201 Onta{\~{n}}{\'o}n, S., Plaza, E., and Zhu, J. (2015).
\newblock Argument-based case revision in cbr for story generation. 202 202 \newblock Argument-based case revision in cbr for story generation.
\newblock In H{\"u}llermeier, E. and Minor, M., editors, {\em Case-Based 203 203 \newblock In H{\"u}llermeier, E. and Minor, M., editors, {\em Case-Based
Reasoning Research and Development}, pages 290--305, Cham. Springer 204 204 Reasoning Research and Development}, pages 290--305, Cham. Springer
International Publishing. 205 205 International Publishing.
206 206
\bibitem[Parejas-Llanovarced et~al., 2024]{PAREJASLLANOVARCED2024111469} 207 207 \bibitem[Parejas-Llanovarced et~al., 2024]{PAREJASLLANOVARCED2024111469}
Parejas-Llanovarced, H., Caro-Martínez, M., del Castillo, M. G.~O., and 208 208 Parejas-Llanovarced, H., Caro-Martínez, M., del Castillo, M. G.~O., and
Recio-García, J.~A. (2024). 209 209 Recio-García, J.~A. (2024).
\newblock Case-based selection of explanation methods for neural network image 210 210 \newblock Case-based selection of explanation methods for neural network image
classifiers. 211 211 classifiers.
\newblock {\em Knowledge-Based Systems}, 288:111469. 212 212 \newblock {\em Knowledge-Based Systems}, 288:111469.
213 213
\bibitem[Petrovic et~al., 2016]{PETROVIC201617} 214 214 \bibitem[Petrovic et~al., 2016]{PETROVIC201617}
Petrovic, S., Khussainova, G., and Jagannathan, R. (2016). 215 215 Petrovic, S., Khussainova, G., and Jagannathan, R. (2016).
\newblock Knowledge-light adaptation approaches in case-based reasoning for 216 216 \newblock Knowledge-light adaptation approaches in case-based reasoning for
radiotherapy treatment planning. 217 217 radiotherapy treatment planning.
\newblock {\em Artificial Intelligence in Medicine}, 68:17--28. 218 218 \newblock {\em Artificial Intelligence in Medicine}, 68:17--28.
219 219
\bibitem[Richter and Weber, 2013]{Richter2013} 220 220 \bibitem[Richter and Weber, 2013]{Richter2013}
Richter, M. and Weber, R. (2013). 221 221 Richter, M. and Weber, R. (2013).
\newblock {\em Case-Based Reasoning (A Textbook)}. 222 222 \newblock {\em Case-Based Reasoning (A Textbook)}.
\newblock Springer-Verlag GmbH. 223 223 \newblock Springer-Verlag GmbH.
224 224
\bibitem[Richter, 2009]{RICHTER20093} 225 225 \bibitem[Richter, 2009]{RICHTER20093}
Richter, M.~M. (2009). 226 226 Richter, M.~M. (2009).
\newblock The search for knowledge, contexts, and case-based reasoning. 227 227 \newblock The search for knowledge, contexts, and case-based reasoning.
\newblock {\em Engineering Applications of Artificial Intelligence}, 228 228 \newblock {\em Engineering Applications of Artificial Intelligence},
22(1):3--9. 229 229 22(1):3--9.
230 230
\bibitem[Robertson and Watson, 2014]{Robertson2014ARO} 231 231 \bibitem[Robertson and Watson, 2014]{Robertson2014ARO}
Robertson, G. and Watson, I.~D. (2014). 232 232 Robertson, G. and Watson, I.~D. (2014).
\newblock A review of real-time strategy game ai. 233 233 \newblock A review of real-time strategy game ai.
\newblock {\em AI Mag.}, 35:75--104. 234 234 \newblock {\em AI Mag.}, 35:75--104.
235 235
\bibitem[{Roldan Reyes} et~al., 2015]{ROLDANREYES20151} 236 236 \bibitem[{Roldan Reyes} et~al., 2015]{ROLDANREYES20151}
{Roldan Reyes}, E., Negny, S., {Cortes Robles}, G., and {Le Lann}, J. (2015). 237 237 {Roldan Reyes}, E., Negny, S., {Cortes Robles}, G., and {Le Lann}, J. (2015).
\newblock Improvement of online adaptation knowledge acquisition and reuse in 238 238 \newblock Improvement of online adaptation knowledge acquisition and reuse in
case-based reasoning: Application to process engineering design. 239 239 case-based reasoning: Application to process engineering design.
\newblock {\em Engineering Applications of Artificial Intelligence}, 41:1--16. 240 240 \newblock {\em Engineering Applications of Artificial Intelligence}, 41:1--16.
241 241
\bibitem[Schank and Abelson, 1977]{schank+abelson77} 242 242 \bibitem[Schank and Abelson, 1977]{schank+abelson77}
Schank, R.~C. and Abelson, R.~P. (1977). 243 243 Schank, R.~C. and Abelson, R.~P. (1977).
\newblock {\em Scripts, Plans, Goals and Understanding: an Inquiry into Human 244 244 \newblock {\em Scripts, Plans, Goals and Understanding: an Inquiry into Human
Knowledge Structures}. 245 245 Knowledge Structures}.
\newblock L. Erlbaum, Hillsdale, NJ. 246 246 \newblock L. Erlbaum, Hillsdale, NJ.
247 247
\bibitem[Seznec et~al., 2020]{pmlr-v108-seznec20a} 248 248 \bibitem[Seznec et~al., 2020]{pmlr-v108-seznec20a}
Seznec, J., Menard, P., Lazaric, A., and Valko, M. (2020). 249 249 Seznec, J., Menard, P., Lazaric, A., and Valko, M. (2020).
\newblock A single algorithm for both restless and rested rotting bandits. 250 250 \newblock A single algorithm for both restless and rested rotting bandits.
\newblock In Chiappa, S. and Calandra, R., editors, {\em Proceedings of the 251 251 \newblock In Chiappa, S. and Calandra, R., editors, {\em Proceedings of the
Twenty Third International Conference on Artificial Intelligence and 252 252 Twenty Third International Conference on Artificial Intelligence and
Statistics}, volume 108 of {\em Proceedings of Machine Learning Research}, 253 253 Statistics}, volume 108 of {\em Proceedings of Machine Learning Research},
pages 3784--3794. PMLR. 254 254 pages 3784--3794. PMLR.
255 255
\bibitem[Sinaga and Yang, 2020]{9072123} 256 256 \bibitem[Sinaga and Yang, 2020]{9072123}
Sinaga, K.~P. and Yang, M.-S. (2020). 257 257 Sinaga, K.~P. and Yang, M.-S. (2020).
\newblock Unsupervised k-means clustering algorithm. 258 258 \newblock Unsupervised k-means clustering algorithm.
\newblock {\em IEEE Access}, 8:80716--80727. 259 259 \newblock {\em IEEE Access}, 8:80716--80727.
260 260
\bibitem[Smyth and Cunningham, 2018]{10.1007/978-3-030-01081-2_25} 261 261 \bibitem[Smyth and Cunningham, 2018]{10.1007/978-3-030-01081-2_25}
Smyth, B. and Cunningham, P. (2018). 262 262 Smyth, B. and Cunningham, P. (2018).
\newblock An analysis of case representations for marathon race prediction and 263 263 \newblock An analysis of case representations for marathon race prediction and
planning. 264 264 planning.
\newblock In Cox, M.~T., Funk, P., and Begum, S., editors, {\em Case-Based 265 265 \newblock In Cox, M.~T., Funk, P., and Begum, S., editors, {\em Case-Based
Reasoning Research and Development}, pages 369--384, Cham. Springer 266 266 Reasoning Research and Development}, pages 369--384, Cham. Springer
International Publishing. 267 267 International Publishing.
268 268
\bibitem[Smyth and Willemsen, 2020]{10.1007/978-3-030-58342-2_8} 269 269 \bibitem[Smyth and Willemsen, 2020]{10.1007/978-3-030-58342-2_8}
Smyth, B. and Willemsen, M.~C. (2020). 270 270 Smyth, B. and Willemsen, M.~C. (2020).
\newblock Predicting the personal-best times of speed skaters using case-based 271 271 \newblock Predicting the personal-best times of speed skaters using case-based
reasoning. 272 272 reasoning.
\newblock In Watson, I. and Weber, R., editors, {\em Case-Based Reasoning 273 273 \newblock In Watson, I. and Weber, R., editors, {\em Case-Based Reasoning
Research and Development}, pages 112--126, Cham. Springer International 274 274 Research and Development}, pages 112--126, Cham. Springer International
Publishing. 275 275 Publishing.
276 276
\bibitem[Soto-Forero et~al., 2024a]{Soto2} 277 277 \bibitem[Soto-Forero et~al., 2024a]{Soto2}
Soto-Forero, D., Ackermann, S., Betbeder, M.-L., and Henriet, J. (2024a). 278 278 Soto-Forero, D., Ackermann, S., Betbeder, M.-L., and Henriet, J. (2024a).
\newblock Automatic real-time adaptation of training session difficulty using 279 279 \newblock Automatic real-time adaptation of training session difficulty using
rules and reinforcement learning in the ai-vt its. 280 280 rules and reinforcement learning in the ai-vt its.
\newblock {\em International Journal of Modern Education and Computer 281 281 \newblock {\em International Journal of Modern Education and Computer
Science(IJMECS)}, 16:56--71. 282 282 Science(IJMECS)}, 16:56--71.
283 283
\bibitem[Soto-Forero et~al., 2024b]{10.1007/978-3-031-63646-2_13} 284 284 \bibitem[Soto-Forero et~al., 2024b]{10.1007/978-3-031-63646-2_13}
Soto-Forero, D., Ackermann, S., Betbeder, M.-L., and Henriet, J. (2024b). 285 285 Soto-Forero, D., Ackermann, S., Betbeder, M.-L., and Henriet, J. (2024b).
\newblock The intelligent tutoring system ai-vt with case-based reasoning and 286 286 \newblock The intelligent tutoring system ai-vt with case-based reasoning and
real time recommender models. 287 287 real time recommender models.
\newblock In Recio-Garcia, J.~A., Orozco-del Castillo, M.~G., and Bridge, D., 288 288 \newblock In Recio-Garcia, J.~A., Orozco-del Castillo, M.~G., and Bridge, D.,
editors, {\em Case-Based Reasoning Research and Development}, pages 191--205, 289 289 editors, {\em Case-Based Reasoning Research and Development}, pages 191--205,
Cham. Springer Nature Switzerland. 290 290 Cham. Springer Nature Switzerland.
291 291
\bibitem[Su et~al., 2022]{SU2022109547} 292 292 \bibitem[Su et~al., 2022]{SU2022109547}
Su, Y., Cheng, Z., Wu, J., Dong, Y., Huang, Z., Wu, L., Chen, E., Wang, S., and 293 293 Su, Y., Cheng, Z., Wu, J., Dong, Y., Huang, Z., Wu, L., Chen, E., Wang, S., and
Xie, F. (2022). 294 294 Xie, F. (2022).
\newblock Graph-based cognitive diagnosis for intelligent tutoring systems. 295 295 \newblock Graph-based cognitive diagnosis for intelligent tutoring systems.
\newblock {\em Knowledge-Based Systems}, 253:109547. 296 296 \newblock {\em Knowledge-Based Systems}, 253:109547.
297 297
\bibitem[Supic, 2018]{8495930} 298 298 \bibitem[Supic, 2018]{8495930}
Supic, H. (2018). 299 299 Supic, H. (2018).
\newblock Case-based reasoning model for personalized learning path 300 300 \newblock Case-based reasoning model for personalized learning path
recommendation in example-based learning activities. 301 301 recommendation in example-based learning activities.
\newblock In {\em 2018 IEEE 27th International Conference on Enabling 302 302 \newblock In {\em 2018 IEEE 27th International Conference on Enabling
Technologies: Infrastructure for Collaborative Enterprises (WETICE)}, pages 303 303 Technologies: Infrastructure for Collaborative Enterprises (WETICE)}, pages
175--178. 304 304 175--178.
305 305
306 \bibitem[Uysal and Sonmez, 2023]{buildings13030651}
@article{ZHANG2021100025, 1 1 @article{ZHANG2021100025,
title = {AI technologies for education: Recent research and future directions}, 2 2 title = {AI technologies for education: Recent research and future directions},
journal = {Computers and Education: Artificial Intelligence}, 3 3 journal = {Computers and Education: Artificial Intelligence},
volume = {2}, 4 4 volume = {2},
pages = {100025}, 5 5 pages = {100025},
language = {English}, 6 6 language = {English},
year = {2021}, 7 7 year = {2021},
issn = {2666-920X}, 8 8 issn = {2666-920X},
type = {article}, 9 9 type = {article},
doi = {https://doi.org/10.1016/j.caeai.2021.100025}, 10 10 doi = {https://doi.org/10.1016/j.caeai.2021.100025},
url = {https://www.sciencedirect.com/science/article/pii/S2666920X21000199}, 11 11 url = {https://www.sciencedirect.com/science/article/pii/S2666920X21000199},
author = {Ke Zhang. and Ayse Begum Aslan}, 12 12 author = {Ke Zhang. and Ayse Begum Aslan},
address={USA}, 13 13 address={USA},
affiliation={Wayne State University; Eastern Michigan University}, 14 14 affiliation={Wayne State University; Eastern Michigan University},
keywords = {Artificial intelligence, AI, AI in Education}, 15 15 keywords = {Artificial intelligence, AI, AI in Education},
abstract = {From unique educational perspectives, this article reports a comprehensive review of selected empirical studies on artificial intelligence in education (AIEd) published in 1993–2020, as collected in the Web of Sciences database and selected AIEd-specialized journals. A total of 40 empirical studies met all selection criteria, and were fully reviewed using multiple methods, including selected bibliometrics, content analysis and categorical meta-trends analysis. This article reports the current state of AIEd research, highlights selected AIEd technologies and applications, reviews their proven and potential benefits for education, bridges the gaps between AI technological innovations and their educational applications, and generates practical examples and inspirations for both technological experts that create AIEd technologies and educators who spearhead AI innovations in education. It also provides rich discussions on practical implications and future research directions from multiple perspectives. The advancement of AIEd calls for critical initiatives to address AI ethics and privacy concerns, and requires interdisciplinary and transdisciplinary collaborations in large-scaled, longitudinal research and development efforts.} 16 16 abstract = {From unique educational perspectives, this article reports a comprehensive review of selected empirical studies on artificial intelligence in education (AIEd) published in 1993–2020, as collected in the Web of Sciences database and selected AIEd-specialized journals. A total of 40 empirical studies met all selection criteria, and were fully reviewed using multiple methods, including selected bibliometrics, content analysis and categorical meta-trends analysis. This article reports the current state of AIEd research, highlights selected AIEd technologies and applications, reviews their proven and potential benefits for education, bridges the gaps between AI technological innovations and their educational applications, and generates practical examples and inspirations for both technological experts that create AIEd technologies and educators who spearhead AI innovations in education. It also provides rich discussions on practical implications and future research directions from multiple perspectives. The advancement of AIEd calls for critical initiatives to address AI ethics and privacy concerns, and requires interdisciplinary and transdisciplinary collaborations in large-scaled, longitudinal research and development efforts.}
} 17 17 }
18 18
@article{PETROVIC201617, 19 19 @article{PETROVIC201617,
title = {Knowledge-light adaptation approaches in case-based reasoning for radiotherapy treatment planning}, 20 20 title = {Knowledge-light adaptation approaches in case-based reasoning for radiotherapy treatment planning},
journal = {Artificial Intelligence in Medicine}, 21 21 journal = {Artificial Intelligence in Medicine},
volume = {68}, 22 22 volume = {68},
pages = {17-28}, 23 23 pages = {17-28},
year = {2016}, 24 24 year = {2016},
language = {English}, 25 25 language = {English},
issn = {0933-3657}, 26 26 issn = {0933-3657},
type = {article}, 27 27 type = {article},
doi = {https://doi.org/10.1016/j.artmed.2016.01.006}, 28 28 doi = {https://doi.org/10.1016/j.artmed.2016.01.006},
url = {https://www.sciencedirect.com/science/article/pii/S093336571630015X}, 29 29 url = {https://www.sciencedirect.com/science/article/pii/S093336571630015X},
author = {Sanja Petrovic and Gulmira Khussainova and Rupa Jagannathan}, 30 30 author = {Sanja Petrovic and Gulmira Khussainova and Rupa Jagannathan},
affiliation={Nottingham University}, 31 31 affiliation={Nottingham University},
address={UK}, 32 32 address={UK},
keywords = {Case-based reasoning, Adaptation-guided retrieval, Machine-learning tools, Radiotherapy treatment planning}, 33 33 keywords = {Case-based reasoning, Adaptation-guided retrieval, Machine-learning tools, Radiotherapy treatment planning},
abstract = {Objective 34 34 abstract = {Objective
Radiotherapy treatment planning aims at delivering a sufficient radiation dose to cancerous tumour cells while sparing healthy organs in the tumour-surrounding area. It is a time-consuming trial-and-error process that requires the expertise of a group of medical experts including oncologists and medical physicists and can take from 2 to 3h to a few days. Our objective is to improve the performance of our previously built case-based reasoning (CBR) system for brain tumour radiotherapy treatment planning. In this system, a treatment plan for a new patient is retrieved from a case base containing patient cases treated in the past and their treatment plans. However, this system does not perform any adaptation, which is needed to account for any difference between the new and retrieved cases. Generally, the adaptation phase is considered to be intrinsically knowledge-intensive and domain-dependent. Therefore, an adaptation often requires a large amount of domain-specific knowledge, which can be difficult to acquire and often is not readily available. In this study, we investigate approaches to adaptation that do not require much domain knowledge, referred to as knowledge-light adaptation. 35 35 Radiotherapy treatment planning aims at delivering a sufficient radiation dose to cancerous tumour cells while sparing healthy organs in the tumour-surrounding area. It is a time-consuming trial-and-error process that requires the expertise of a group of medical experts including oncologists and medical physicists and can take from 2 to 3h to a few days. Our objective is to improve the performance of our previously built case-based reasoning (CBR) system for brain tumour radiotherapy treatment planning. In this system, a treatment plan for a new patient is retrieved from a case base containing patient cases treated in the past and their treatment plans. However, this system does not perform any adaptation, which is needed to account for any difference between the new and retrieved cases. Generally, the adaptation phase is considered to be intrinsically knowledge-intensive and domain-dependent. Therefore, an adaptation often requires a large amount of domain-specific knowledge, which can be difficult to acquire and often is not readily available. In this study, we investigate approaches to adaptation that do not require much domain knowledge, referred to as knowledge-light adaptation.
Methodology 36 36 Methodology
We developed two adaptation approaches: adaptation based on machine-learning tools and adaptation-guided retrieval. They were used to adapt the beam number and beam angles suggested in the retrieved case. Two machine-learning tools, neural networks and naive Bayes classifier, were used in the adaptation to learn how the difference in attribute values between the retrieved and new cases affects the output of these two cases. The adaptation-guided retrieval takes into consideration not only the similarity between the new and retrieved cases, but also how to adapt the retrieved case. 37 37 We developed two adaptation approaches: adaptation based on machine-learning tools and adaptation-guided retrieval. They were used to adapt the beam number and beam angles suggested in the retrieved case. Two machine-learning tools, neural networks and naive Bayes classifier, were used in the adaptation to learn how the difference in attribute values between the retrieved and new cases affects the output of these two cases. The adaptation-guided retrieval takes into consideration not only the similarity between the new and retrieved cases, but also how to adapt the retrieved case.
Results 38 38 Results
The research was carried out in collaboration with medical physicists at the Nottingham University Hospitals NHS Trust, City Hospital Campus, UK. All experiments were performed using real-world brain cancer patient cases treated with three-dimensional (3D)-conformal radiotherapy. Neural networks-based adaptation improved the success rate of the CBR system with no adaptation by 12%. However, naive Bayes classifier did not improve the current retrieval results as it did not consider the interplay among attributes. The adaptation-guided retrieval of the case for beam number improved the success rate of the CBR system by 29%. However, it did not demonstrate good performance for the beam angle adaptation. Its success rate was 29% versus 39% when no adaptation was performed. 39 39 The research was carried out in collaboration with medical physicists at the Nottingham University Hospitals NHS Trust, City Hospital Campus, UK. All experiments were performed using real-world brain cancer patient cases treated with three-dimensional (3D)-conformal radiotherapy. Neural networks-based adaptation improved the success rate of the CBR system with no adaptation by 12%. However, naive Bayes classifier did not improve the current retrieval results as it did not consider the interplay among attributes. The adaptation-guided retrieval of the case for beam number improved the success rate of the CBR system by 29%. However, it did not demonstrate good performance for the beam angle adaptation. Its success rate was 29% versus 39% when no adaptation was performed.
Conclusions 40 40 Conclusions
The obtained empirical results demonstrate that the proposed adaptation methods improve the performance of the existing CBR system in recommending the number of beams to use. However, we also conclude that to be effective, the proposed adaptation of beam angles requires a large number of relevant cases in the case base.} 41 41 The obtained empirical results demonstrate that the proposed adaptation methods improve the performance of the existing CBR system in recommending the number of beams to use. However, we also conclude that to be effective, the proposed adaptation of beam angles requires a large number of relevant cases in the case base.}
} 42 42 }
43 43
@article{ROLDANREYES20151, 44 44 @article{ROLDANREYES20151,
title = {Improvement of online adaptation knowledge acquisition and reuse in case-based reasoning: Application to process engineering design}, 45 45 title = {Improvement of online adaptation knowledge acquisition and reuse in case-based reasoning: Application to process engineering design},
journal = {Engineering Applications of Artificial Intelligence}, 46 46 journal = {Engineering Applications of Artificial Intelligence},
volume = {41}, 47 47 volume = {41},
pages = {1-16}, 48 48 pages = {1-16},
affiliation={Université de Toulouse; Instituto Tecnologico de Orizaba}, 49 49 affiliation={Université de Toulouse; Instituto Tecnologico de Orizaba},
country={France}, 50 50 country={France},
language = {English}, 51 51 language = {English},
year = {2015}, 52 52 year = {2015},
type = {article}, 53 53 type = {article},
issn = {0952-1976}, 54 54 issn = {0952-1976},
doi = {https://doi.org/10.1016/j.engappai.2015.01.015}, 55 55 doi = {https://doi.org/10.1016/j.engappai.2015.01.015},
url = {https://www.sciencedirect.com/science/article/pii/S0952197615000263}, 56 56 url = {https://www.sciencedirect.com/science/article/pii/S0952197615000263},
author = {E. {Roldan Reyes} and S. Negny and G. {Cortes Robles} and J.M. {Le Lann}}, 57 57 author = {E. {Roldan Reyes} and S. Negny and G. {Cortes Robles} and J.M. {Le Lann}},
keywords = {Case based reasoning, Constraint satisfaction problems, Interactive adaptation method, Online knowledge acquisition, Failure diagnosis and repair}, 58 58 keywords = {Case based reasoning, Constraint satisfaction problems, Interactive adaptation method, Online knowledge acquisition, Failure diagnosis and repair},
abstract = {Despite various publications in the area during the last few years, the adaptation step is still a crucial phase for a relevant and reasonable Case Based Reasoning system. Furthermore, the online acquisition of the new adaptation knowledge is of particular interest as it enables the progressive improvement of the system while reducing the knowledge engineering effort without constraints for the expert. Therefore this paper presents a new interactive method for adaptation knowledge elicitation, acquisition and reuse, thanks to a modification of the traditional CBR cycle. Moreover to improve adaptation knowledge reuse, a test procedure is also implemented to help the user in the adaptation step and its diagnosis during adaptation failure. A study on the quality and usefulness of the new knowledge acquired is also driven. As our Knowledge Based Systems (KBS) is more focused on preliminary design, and more particularly in the field of process engineering, we need to unify in the same method two types of knowledge: contextual and general. To realize this, this article proposes the integration of the Constraint Satisfaction Problem (based on general knowledge) approach into the Case Based Reasoning (based on contextual knowledge) process to improve the case representation and the adaptation of past experiences. To highlight its capability, the proposed approach is illustrated through a case study dedicated to the design of an industrial mixing device.} 59 59 abstract = {Despite various publications in the area during the last few years, the adaptation step is still a crucial phase for a relevant and reasonable Case Based Reasoning system. Furthermore, the online acquisition of the new adaptation knowledge is of particular interest as it enables the progressive improvement of the system while reducing the knowledge engineering effort without constraints for the expert. Therefore this paper presents a new interactive method for adaptation knowledge elicitation, acquisition and reuse, thanks to a modification of the traditional CBR cycle. Moreover to improve adaptation knowledge reuse, a test procedure is also implemented to help the user in the adaptation step and its diagnosis during adaptation failure. A study on the quality and usefulness of the new knowledge acquired is also driven. As our Knowledge Based Systems (KBS) is more focused on preliminary design, and more particularly in the field of process engineering, we need to unify in the same method two types of knowledge: contextual and general. To realize this, this article proposes the integration of the Constraint Satisfaction Problem (based on general knowledge) approach into the Case Based Reasoning (based on contextual knowledge) process to improve the case representation and the adaptation of past experiences. To highlight its capability, the proposed approach is illustrated through a case study dedicated to the design of an industrial mixing device.}
} 60 60 }
61 61
@article{JUNG20095695, 62 62 @article{JUNG20095695,
title = {Integrating radial basis function networks with case-based reasoning for product design}, 63 63 title = {Integrating radial basis function networks with case-based reasoning for product design},
journal = {Expert Systems with Applications}, 64 64 journal = {Expert Systems with Applications},
volume = {36}, 65 65 volume = {36},
number = {3, Part 1}, 66 66 number = {3, Part 1},
language = {English}, 67 67 language = {English},
pages = {5695-5701}, 68 68 pages = {5695-5701},
year = {2009}, 69 69 year = {2009},
type = {article}, 70 70 type = {article},
issn = {0957-4174}, 71 71 issn = {0957-4174},
doi = {https://doi.org/10.1016/j.eswa.2008.06.099}, 72 72 doi = {https://doi.org/10.1016/j.eswa.2008.06.099},
url = {https://www.sciencedirect.com/science/article/pii/S0957417408003667}, 73 73 url = {https://www.sciencedirect.com/science/article/pii/S0957417408003667},
author = {Sabum Jung and Taesoo Lim and Dongsoo Kim}, 74 74 author = {Sabum Jung and Taesoo Lim and Dongsoo Kim},
affiliation={LG Production Engineering Research Institute; Sungkyul University; Soongsil University}, 75 75 affiliation={LG Production Engineering Research Institute; Sungkyul University; Soongsil University},
keywords = {Case-based reasoning (CBR), Radial basis function network (RBFN), Design expert system, Product design}, 76 76 keywords = {Case-based reasoning (CBR), Radial basis function network (RBFN), Design expert system, Product design},
abstract = {This paper presents a case-based design expert system that automatically determines the design values of a product. We focus on the design problem of a shadow mask which is a core component of monitors in the electronics industry. In case-based reasoning (CBR), it is important to retrieve similar cases and adapt them to meet design specifications exactly. Notably, difficulties in automating the adaptation process have prevented designers from being able to use design expert systems easily and efficiently. In this paper, we present a hybrid approach combining CBR and artificial neural networks in order to solve the problems occurring during the adaptation process. We first constructed a radial basis function network (RBFN) composed of representative cases created by K-means clustering. Then, the representative case most similar to the current problem was adjusted using the network. The rationale behind the proposed approach is discussed, and experimental results acquired from real shadow mask design are presented. Using the design expert system, designers can reduce design time and errors and enhance the total quality of design. Furthermore, the expert system facilitates effective sharing of design knowledge among designers.} 77 77 abstract = {This paper presents a case-based design expert system that automatically determines the design values of a product. We focus on the design problem of a shadow mask which is a core component of monitors in the electronics industry. In case-based reasoning (CBR), it is important to retrieve similar cases and adapt them to meet design specifications exactly. Notably, difficulties in automating the adaptation process have prevented designers from being able to use design expert systems easily and efficiently. In this paper, we present a hybrid approach combining CBR and artificial neural networks in order to solve the problems occurring during the adaptation process. We first constructed a radial basis function network (RBFN) composed of representative cases created by K-means clustering. Then, the representative case most similar to the current problem was adjusted using the network. The rationale behind the proposed approach is discussed, and experimental results acquired from real shadow mask design are presented. Using the design expert system, designers can reduce design time and errors and enhance the total quality of design. Furthermore, the expert system facilitates effective sharing of design knowledge among designers.}
} 78 78 }
79 79
@article{CHIU2023100118, 80 80 @article{CHIU2023100118,
title = {Systematic literature review on opportunities, challenges, and future research recommendations of artificial intelligence in education}, 81 81 title = {Systematic literature review on opportunities, challenges, and future research recommendations of artificial intelligence in education},
journal = {Computers and Education: Artificial Intelligence}, 82 82 journal = {Computers and Education: Artificial Intelligence},
volume = {4}, 83 83 volume = {4},
language = {English}, 84 84 language = {English},
type = {article}, 85 85 type = {article},
pages = {100118}, 86 86 pages = {100118},
year = {2023}, 87 87 year = {2023},
issn = {2666-920X}, 88 88 issn = {2666-920X},
doi = {https://doi.org/10.1016/j.caeai.2022.100118}, 89 89 doi = {https://doi.org/10.1016/j.caeai.2022.100118},
url = {https://www.sciencedirect.com/science/article/pii/S2666920X2200073X}, 90 90 url = {https://www.sciencedirect.com/science/article/pii/S2666920X2200073X},
author = {Thomas K.F. Chiu and Qi Xia and Xinyan Zhou and Ching Sing Chai and Miaoting Cheng}, 91 91 author = {Thomas K.F. Chiu and Qi Xia and Xinyan Zhou and Ching Sing Chai and Miaoting Cheng},
keywords = {Artificial intelligence, Artificial intelligence in education, Systematic review, Learning, Teaching, Assessment}, 92 92 keywords = {Artificial intelligence, Artificial intelligence in education, Systematic review, Learning, Teaching, Assessment},
abstract = {Applications of artificial intelligence in education (AIEd) are emerging and are new to researchers and practitioners alike. Reviews of the relevant literature have not examined how AI technologies have been integrated into each of the four key educational domains of learning, teaching, assessment, and administration. The relationships between the technologies and learning outcomes for students and teachers have also been neglected. This systematic review study aims to understand the opportunities and challenges of AIEd by examining the literature from the last 10 years (2012–2021) using matrix coding and content analysis approaches. The results present the current focus of AIEd research by identifying 13 roles of AI technologies in the key educational domains, 7 learning outcomes of AIEd, and 10 major challenges. The review also provides suggestions for future directions of AIEd research.} 93 93 abstract = {Applications of artificial intelligence in education (AIEd) are emerging and are new to researchers and practitioners alike. Reviews of the relevant literature have not examined how AI technologies have been integrated into each of the four key educational domains of learning, teaching, assessment, and administration. The relationships between the technologies and learning outcomes for students and teachers have also been neglected. This systematic review study aims to understand the opportunities and challenges of AIEd by examining the literature from the last 10 years (2012–2021) using matrix coding and content analysis approaches. The results present the current focus of AIEd research by identifying 13 roles of AI technologies in the key educational domains, 7 learning outcomes of AIEd, and 10 major challenges. The review also provides suggestions for future directions of AIEd research.}
} 94 94 }
95 95
@article{Robertson2014ARO, 96 96 @article{Robertson2014ARO,
title = {A Review of Real-Time Strategy Game AI}, 97 97 title = {A Review of Real-Time Strategy Game AI},
author = {Glen Robertson and Ian D. Watson}, 98 98 author = {Glen Robertson and Ian D. Watson},
affiliation = {University of Auckland }, 99 99 affiliation = {University of Auckland },
keywords = {Game, IA, Real-time strategy}, 100 100 keywords = {Game, IA, Real-time strategy},
type={article}, 101 101 type={article},
language={English}, 102 102 language={English},
abstract = {This literature review covers AI techniques used for real-time strategy video games, focusing specifically on StarCraft. It finds that the main areas of current academic research are in tactical and strategic decision making, plan recognition, and learning, and it outlines the research contributions in each of these areas. The paper then contrasts the use of game AI in academe and industry, finding the academic research heavily focused on creating game-winning agents, while the industry aims to maximize player enjoyment. It finds that industry adoption of academic research is low because it is either inapplicable or too time-consuming and risky to implement in a new game, which highlights an area for potential investigation: bridging the gap between academe and industry. Finally, the areas of spatial reasoning, multiscale AI, and cooperation are found to require future work, and standardized evaluation methods are proposed to produce comparable results between studies.}, 103 103 abstract = {This literature review covers AI techniques used for real-time strategy video games, focusing specifically on StarCraft. It finds that the main areas of current academic research are in tactical and strategic decision making, plan recognition, and learning, and it outlines the research contributions in each of these areas. The paper then contrasts the use of game AI in academe and industry, finding the academic research heavily focused on creating game-winning agents, while the industry aims to maximize player enjoyment. It finds that industry adoption of academic research is low because it is either inapplicable or too time-consuming and risky to implement in a new game, which highlights an area for potential investigation: bridging the gap between academe and industry. Finally, the areas of spatial reasoning, multiscale AI, and cooperation are found to require future work, and standardized evaluation methods are proposed to produce comparable results between studies.},
journal = {AI Mag.}, 104 104 journal = {AI Mag.},
year = {2014}, 105 105 year = {2014},
volume = {35}, 106 106 volume = {35},
pages = {75-104} 107 107 pages = {75-104}
} 108 108 }
109 109
@Inproceedings{10.1007/978-3-642-15973-2_50, 110 110 @Inproceedings{10.1007/978-3-642-15973-2_50,
author={Butdee, S. 111 111 author={Butdee, S.
and Tichkiewitch, S.}, 112 112 and Tichkiewitch, S.},
affiliation={University of Technology North Bangkok; Grenoble Institute of Technology}, 113 113 affiliation={University of Technology North Bangkok; Grenoble Institute of Technology},
editor={Bernard, Alain}, 114 114 editor={Bernard, Alain},
title={Case-Based Reasoning for Adaptive Aluminum Extrusion Die Design Together with Parameters by Neural Networks}, 115 115 title={Case-Based Reasoning for Adaptive Aluminum Extrusion Die Design Together with Parameters by Neural Networks},
keywords={Adaptive die design and parameters, Optimal aluminum extrusion, Case-based reasoning, Neural networks}, 116 116 keywords={Adaptive die design and parameters, Optimal aluminum extrusion, Case-based reasoning, Neural networks},
booktitle={Global Product Development}, 117 117 booktitle={Global Product Development},
year={2011}, 118 118 year={2011},
type = {article; proceedings paper}, 119 119 type = {article; proceedings paper},
language = {English}, 120 120 language = {English},
publisher = {Springer Berlin Heidelberg}, 121 121 publisher = {Springer Berlin Heidelberg},
address = {Berlin, Heidelberg}, 122 122 address = {Berlin, Heidelberg},
pages = {491--496}, 123 123 pages = {491--496},
abstract = {Nowadays Aluminum extrusion die design is a critical task for improving productivity which involves with quality, time and cost. Case-Based Reasoning (CBR) method has been successfully applied to support the die design process in order to design a new die by tackling previous problems together with their solutions to match with a new similar problem. Such solutions are selected and modified to solve the present problem. However, the applications of the CBR are useful only retrieving previous features whereas the critical parameters are missing. In additions, the experience learning to such parameters are limited. This chapter proposes Artificial Neural Network (ANN) to associate the CBR in order to learning previous parameters and predict to the new die design according to the primitive die modification. The most satisfactory is to accommodate the optimal parameters of extrusion processes.}, 124 124 abstract = {Nowadays Aluminum extrusion die design is a critical task for improving productivity which involves with quality, time and cost. Case-Based Reasoning (CBR) method has been successfully applied to support the die design process in order to design a new die by tackling previous problems together with their solutions to match with a new similar problem. Such solutions are selected and modified to solve the present problem. However, the applications of the CBR are useful only retrieving previous features whereas the critical parameters are missing. In additions, the experience learning to such parameters are limited. This chapter proposes Artificial Neural Network (ANN) to associate the CBR in order to learning previous parameters and predict to the new die design according to the primitive die modification. The most satisfactory is to accommodate the optimal parameters of extrusion processes.},
isbn = {978-3-642-15973-2} 125 125 isbn = {978-3-642-15973-2}
} 126 126 }
127 127
@Inproceedings{10.1007/978-3-319-47096-2_11, 128 128 @Inproceedings{10.1007/978-3-319-47096-2_11,
author={Grace, Kazjon 129 129 author={Grace, Kazjon
and Maher, Mary Lou 130 130 and Maher, Mary Lou
and Wilson, David C. 131 131 and Wilson, David C.
and Najjar, Nadia A.}, 132 132 and Najjar, Nadia A.},
affiliation={University of North Carolina at Charlotte}, 133 133 affiliation={University of North Carolina at Charlotte},
editor={Goel, Ashok 134 134 editor={Goel, Ashok
and D{\'i}az-Agudo, M Bel{\'e}n 135 135 and D{\'i}az-Agudo, M Bel{\'e}n
and Roth-Berghofer, Thomas}, 136 136 and Roth-Berghofer, Thomas},
title={Combining CBR and Deep Learning to Generate Surprising Recipe Designs}, 137 137 title={Combining CBR and Deep Learning to Generate Surprising Recipe Designs},
keywords={Case-based reasoning, deep learning, recipe design}, 138 138 keywords={Case-based reasoning, deep learning, recipe design},
type = {article; proceedings paper}, 139 139 type = {article; proceedings paper},
booktitle={Case-Based Reasoning Research and Development}, 140 140 booktitle={Case-Based Reasoning Research and Development},
year={2016}, 141 141 year={2016},
publisher={Springer International Publishing}, 142 142 publisher={Springer International Publishing},
address={Cham}, 143 143 address={Cham},
language = {English}, 144 144 language = {English},
pages={154--169}, 145 145 pages={154--169},
abstract={This paper presents a dual-cycle CBR model in the domain of recipe generation. The model combines the strengths of deep learning and similarity-based retrieval to generate recipes that are novel and valuable (i.e. they are creative). The first cycle generates abstract descriptions which we call ``design concepts'' by synthesizing expectations from the entire case base, while the second cycle uses those concepts to retrieve and adapt objects. We define these conceptual object representations as an abstraction over complete cases on which expectations can be formed, allowing objects to be evaluated for surprisingness (the peak level of unexpectedness in the object, given the case base) and plausibility (the overall similarity of the object to those in the case base). The paper presents a prototype implementation of the model, and demonstrates its ability to generate objects that are simultaneously plausible and surprising, in addition to fitting a user query. This prototype is then compared to a traditional single-cycle CBR system.}, 146 146 abstract={This paper presents a dual-cycle CBR model in the domain of recipe generation. The model combines the strengths of deep learning and similarity-based retrieval to generate recipes that are novel and valuable (i.e. they are creative). The first cycle generates abstract descriptions which we call ``design concepts'' by synthesizing expectations from the entire case base, while the second cycle uses those concepts to retrieve and adapt objects. We define these conceptual object representations as an abstraction over complete cases on which expectations can be formed, allowing objects to be evaluated for surprisingness (the peak level of unexpectedness in the object, given the case base) and plausibility (the overall similarity of the object to those in the case base). The paper presents a prototype implementation of the model, and demonstrates its ability to generate objects that are simultaneously plausible and surprising, in addition to fitting a user query. This prototype is then compared to a traditional single-cycle CBR system.},
isbn={978-3-319-47096-2} 147 147 isbn={978-3-319-47096-2}
} 148 148 }
149 149
@Inproceedings{10.1007/978-3-319-61030-6_1, 150 150 @Inproceedings{10.1007/978-3-319-61030-6_1,
author={Maher, Mary Lou 151 151 author={Maher, Mary Lou
and Grace, Kazjon}, 152 152 and Grace, Kazjon},
editor={Aha, David W. 153 153 editor={Aha, David W.
and Lieber, Jean}, 154 154 and Lieber, Jean},
affiliation={University of North Carolina at Charlotte}, 155 155 affiliation={University of North Carolina at Charlotte},
title={Encouraging Curiosity in Case-Based Reasoning and Recommender Systems}, 156 156 title={Encouraging Curiosity in Case-Based Reasoning and Recommender Systems},
keywords={Curiosity, Case-based reasoning, Recommender systems}, 157 157 keywords={Curiosity, Case-based reasoning, Recommender systems},
booktitle={Case-Based Reasoning Research and Development}, 158 158 booktitle={Case-Based Reasoning Research and Development},
year={2017}, 159 159 year={2017},
publisher={Springer International Publishing}, 160 160 publisher={Springer International Publishing},
address={Cham}, 161 161 address={Cham},
pages={3--15}, 162 162 pages={3--15},
language = {English}, 163 163 language = {English},
type = {article; proceedings paper}, 164 164 type = {article; proceedings paper},
abstract={A key benefit of case-based reasoning (CBR) and recommender systems is the use of past experience to guide the synthesis or selection of the best solution for a specific context or user. Typically, the solution presented to the user is based on a value system that privileges the closest match in a query and the solution that performs best when evaluated according to predefined requirements. In domains in which creativity is desirable or the user is engaged in a learning activity, there is a benefit to moving beyond the expected or ``best match'' and include results based on computational models of novelty and surprise. In this paper, models of novelty and surprise are integrated with both CBR and Recommender Systems to encourage user curiosity.}, 165 165 abstract={A key benefit of case-based reasoning (CBR) and recommender systems is the use of past experience to guide the synthesis or selection of the best solution for a specific context or user. Typically, the solution presented to the user is based on a value system that privileges the closest match in a query and the solution that performs best when evaluated according to predefined requirements. In domains in which creativity is desirable or the user is engaged in a learning activity, there is a benefit to moving beyond the expected or ``best match'' and include results based on computational models of novelty and surprise. In this paper, models of novelty and surprise are integrated with both CBR and Recommender Systems to encourage user curiosity.},
isbn={978-3-319-61030-6} 166 166 isbn={978-3-319-61030-6}
} 167 167 }
168 168
@Inproceedings{Muller, 169 169 @Inproceedings{Muller,
author = {Müller, G. and Bergmann, R.}, 170 170 author = {Müller, G. and Bergmann, R.},
affiliation={University of Trier}, 171 171 affiliation={University of Trier},
year = {2015}, 172 172 year = {2015},
month = {01}, 173 173 month = {01},
language = {English}, 174 174 language = {English},
type = {article; proceedings paper}, 175 175 type = {article; proceedings paper},
abstract = {This paper presents CookingCAKE,a framework for the adaptation of cooking recipes represented as workflows. CookingCAKE integrates and combines several workflow adaptation approaches applied in process-oriented case based reasoning (POCBR) in a single adaptation framework, thus providing a capable tool for the adaptation of cooking recipes. The available case base of cooking workflows is analyzed to generate adaptation knowledge which is used to adapt a recipe regarding restrictions and resources, which the user may define for the preparation of a dish.}, 176 176 abstract = {This paper presents CookingCAKE,a framework for the adaptation of cooking recipes represented as workflows. CookingCAKE integrates and combines several workflow adaptation approaches applied in process-oriented case based reasoning (POCBR) in a single adaptation framework, thus providing a capable tool for the adaptation of cooking recipes. The available case base of cooking workflows is analyzed to generate adaptation knowledge which is used to adapt a recipe regarding restrictions and resources, which the user may define for the preparation of a dish.},
booktitle = {International Conference on Case-Based Reasoning}, 177 177 booktitle = {International Conference on Case-Based Reasoning},
title = {CookingCAKE: A Framework for the adaptation of cooking recipes represented as workflows}, 178 178 title = {CookingCAKE: A Framework for the adaptation of cooking recipes represented as workflows},
keywords={recipe adaptation, workflow adaptation, workflows, process-oriented, case based reasoning} 179 179 keywords={recipe adaptation, workflow adaptation, workflows, process-oriented, case based reasoning}
} 180 180 }
181 181
@Inproceedings{10.1007/978-3-319-24586-7_20, 182 182 @Inproceedings{10.1007/978-3-319-24586-7_20,
author={Onta{\~{n}}{\'o}n, S. 183 183 author={Onta{\~{n}}{\'o}n, S.
and Plaza, E. 184 184 and Plaza, E.
and Zhu, J.}, 185 185 and Zhu, J.},
editor={H{\"u}llermeier, Eyke 186 186 editor={H{\"u}llermeier, Eyke
and Minor, Mirjam}, 187 187 and Minor, Mirjam},
affiliation={Drexel University; Artificial Intelligence Research Institute CSIC}, 188 188 affiliation={Drexel University; Artificial Intelligence Research Institute CSIC},
title={Argument-Based Case Revision in CBR for Story Generation}, 189 189 title={Argument-Based Case Revision in CBR for Story Generation},
keywords={CBR, Case-based reasoning, Story generation}, 190 190 keywords={CBR, Case-based reasoning, Story generation},
booktitle={Case-Based Reasoning Research and Development}, 191 191 booktitle={Case-Based Reasoning Research and Development},
year={2015}, 192 192 year={2015},
publisher={Springer International Publishing}, 193 193 publisher={Springer International Publishing},
address={Cham}, 194 194 address={Cham},
language = {English}, 195 195 language = {English},
pages={290--305}, 196 196 pages={290--305},
type = {article; proceedings paper}, 197 197 type = {article; proceedings paper},
abstract={This paper presents a new approach to case revision in case-based reasoning based on the idea of argumentation. Previous work on case reuse has proposed the use of operations such as case amalgamation (or merging), which generate solutions by combining information coming from different cases. Such approaches are often based on exploring the search space of possible combinations looking for a solution that maximizes a certain criteria. We show how Revise can be performed by arguments attacking specific parts of a case produced by Reuse, and how they can guide and prevent repeating pitfalls in future cases. The proposed approach is evaluated in the task of automatic story generation.}, 198 198 abstract={This paper presents a new approach to case revision in case-based reasoning based on the idea of argumentation. Previous work on case reuse has proposed the use of operations such as case amalgamation (or merging), which generate solutions by combining information coming from different cases. Such approaches are often based on exploring the search space of possible combinations looking for a solution that maximizes a certain criteria. We show how Revise can be performed by arguments attacking specific parts of a case produced by Reuse, and how they can guide and prevent repeating pitfalls in future cases. The proposed approach is evaluated in the task of automatic story generation.},
isbn={978-3-319-24586-7} 199 199 isbn={978-3-319-24586-7}
} 200 200 }
201 201
@Inproceedings{10.1007/978-3-030-58342-2_20, 202 202 @Inproceedings{10.1007/978-3-030-58342-2_20,
author={Lepage, Yves 203 203 author={Lepage, Yves
and Lieber, Jean 204 204 and Lieber, Jean
and Mornard, Isabelle 205 205 and Mornard, Isabelle
and Nauer, Emmanuel 206 206 and Nauer, Emmanuel
and Romary, Julien 207 207 and Romary, Julien
and Sies, Reynault}, 208 208 and Sies, Reynault},
editor={Watson, Ian 209 209 editor={Watson, Ian
and Weber, Rosina}, 210 210 and Weber, Rosina},
title={The French Correction: When Retrieval Is Harder to Specify than Adaptation}, 211 211 title={The French Correction: When Retrieval Is Harder to Specify than Adaptation},
affiliation={Waseda University; Université de Lorraine}, 212 212 affiliation={Waseda University; Université de Lorraine},
keywords={case-based reasoning, retrieval, analogy, sentence correction}, 213 213 keywords={case-based reasoning, retrieval, analogy, sentence correction},
booktitle={Case-Based Reasoning Research and Development}, 214 214 booktitle={Case-Based Reasoning Research and Development},
year={2020}, 215 215 year={2020},
language = {English}, 216 216 language = {English},
type = {article; proceedings paper}, 217 217 type = {article; proceedings paper},
publisher={Springer International Publishing}, 218 218 publisher={Springer International Publishing},
address={Cham}, 219 219 address={Cham},
pages={309--324}, 220 220 pages={309--324},
abstract={A common idea in the field of case-based reasoning is that the retrieval step can be specified by the use of some similarity measure: the retrieved cases maximize the similarity to the target problem and, then, the adaptation step has to take into account the mismatches between the retrieved cases and the target problem in order to this latter. The use of this methodological schema for the application described in this paper has proven to be non efficient. Indeed, designing a retrieval procedure without the precise knowledge of the adaptation procedure has not been possible. The domain of this application is the correction of French sentences: a problem is an incorrect sentence and a valid solution is a correction of this problem. Adaptation consists in solving an analogical equation that enables to execute the correction of the retrieved case on the target problem. Thus, retrieval has to ensure that this application is feasible. The first version of such a retrieval procedure is described and evaluated: it is a knowledge-light procedure that does not use linguistic knowledge about French.}, 221 221 abstract={A common idea in the field of case-based reasoning is that the retrieval step can be specified by the use of some similarity measure: the retrieved cases maximize the similarity to the target problem and, then, the adaptation step has to take into account the mismatches between the retrieved cases and the target problem in order to this latter. The use of this methodological schema for the application described in this paper has proven to be non efficient. Indeed, designing a retrieval procedure without the precise knowledge of the adaptation procedure has not been possible. The domain of this application is the correction of French sentences: a problem is an incorrect sentence and a valid solution is a correction of this problem. Adaptation consists in solving an analogical equation that enables to execute the correction of the retrieved case on the target problem. Thus, retrieval has to ensure that this application is feasible. The first version of such a retrieval procedure is described and evaluated: it is a knowledge-light procedure that does not use linguistic knowledge about French.},
isbn={978-3-030-58342-2} 222 222 isbn={978-3-030-58342-2}
} 223 223 }
224 224
@Inproceedings{10.1007/978-3-030-01081-2_25, 225 225 @Inproceedings{10.1007/978-3-030-01081-2_25,
author={Smyth, Barry 226 226 author={Smyth, Barry
and Cunningham, P{\'a}draig}, 227 227 and Cunningham, P{\'a}draig},
editor={Cox, Michael T. 228 228 editor={Cox, Michael T.
and Funk, Peter 229 229 and Funk, Peter
and Begum, Shahina}, 230 230 and Begum, Shahina},
affiliation={University College Dublin}, 231 231 affiliation={University College Dublin},
title={An Analysis of Case Representations for Marathon Race Prediction and Planning}, 232 232 title={An Analysis of Case Representations for Marathon Race Prediction and Planning},
keywords={Marathon planning, Case representation, Case-based reasoning}, 233 233 keywords={Marathon planning, Case representation, Case-based reasoning},
booktitle={Case-Based Reasoning Research and Development}, 234 234 booktitle={Case-Based Reasoning Research and Development},
year={2018}, 235 235 year={2018},
language = {English}, 236 236 language = {English},
publisher={Springer International Publishing}, 237 237 publisher={Springer International Publishing},
address={Cham}, 238 238 address={Cham},
pages={369--384}, 239 239 pages={369--384},
type = {article; proceedings paper}, 240 240 type = {article; proceedings paper},
abstract={We use case-based reasoning to help marathoners achieve a personal best for an upcoming race, by helping them to select an achievable goal-time and a suitable pacing plan. We evaluate several case representations and, using real-world race data, highlight their performance implications. Richer representations do not always deliver better prediction performance, but certain representational configurations do offer very significant practical benefits for runners, when it comes to predicting, and planning for, challenging goal-times during an upcoming race.}, 241 241 abstract={We use case-based reasoning to help marathoners achieve a personal best for an upcoming race, by helping them to select an achievable goal-time and a suitable pacing plan. We evaluate several case representations and, using real-world race data, highlight their performance implications. Richer representations do not always deliver better prediction performance, but certain representational configurations do offer very significant practical benefits for runners, when it comes to predicting, and planning for, challenging goal-times during an upcoming race.},
isbn={978-3-030-01081-2} 242 242 isbn={978-3-030-01081-2}
} 243 243 }
244 244
@Inproceedings{10.1007/978-3-030-58342-2_8, 245 245 @Inproceedings{10.1007/978-3-030-58342-2_8,
author={Smyth, Barry 246 246 author={Smyth, Barry
and Willemsen, Martijn C.}, 247 247 and Willemsen, Martijn C.},
editor={Watson, Ian 248 248 editor={Watson, Ian
and Weber, Rosina}, 249 249 and Weber, Rosina},
affiliation={University College Dublin; Eindhoven University of Technology}, 250 250 affiliation={University College Dublin; Eindhoven University of Technology},
title={Predicting the Personal-Best Times of Speed Skaters Using Case-Based Reasoning}, 251 251 title={Predicting the Personal-Best Times of Speed Skaters Using Case-Based Reasoning},
keywords={CBR for health and exercise, speed skating, race-time prediction, case representation}, 252 252 keywords={CBR for health and exercise, speed skating, race-time prediction, case representation},
booktitle={Case-Based Reasoning Research and Development}, 253 253 booktitle={Case-Based Reasoning Research and Development},
year={2020}, 254 254 year={2020},
type = {article; proceedings paper}, 255 255 type = {article; proceedings paper},
language = {English}, 256 256 language = {English},
publisher={Springer International Publishing}, 257 257 publisher={Springer International Publishing},
address={Cham}, 258 258 address={Cham},
pages={112--126}, 259 259 pages={112--126},
abstract={Speed skating is a form of ice skating in which the skaters race each other over a variety of standardised distances. Races take place on specialised ice-rinks and the type of track and ice conditions can have a significant impact on race-times. As race distances increase, pacing also plays an important role. In this paper we seek to extend recent work on the application of case-based reasoning to marathon-time prediction by predicting race-times for speed skaters. In particular, we propose and evaluate a number of case-based reasoning variants based on different case and feature representations to generate track-specific race predictions. We show it is possible to improve upon state-of-the-art prediction accuracy by harnessing richer case representations using shorter races and track-adjusted finish and lap-times.}, 260 260 abstract={Speed skating is a form of ice skating in which the skaters race each other over a variety of standardised distances. Races take place on specialised ice-rinks and the type of track and ice conditions can have a significant impact on race-times. As race distances increase, pacing also plays an important role. In this paper we seek to extend recent work on the application of case-based reasoning to marathon-time prediction by predicting race-times for speed skaters. In particular, we propose and evaluate a number of case-based reasoning variants based on different case and feature representations to generate track-specific race predictions. We show it is possible to improve upon state-of-the-art prediction accuracy by harnessing richer case representations using shorter races and track-adjusted finish and lap-times.},
isbn={978-3-030-58342-2} 261 261 isbn={978-3-030-58342-2}
} 262 262 }
263 263
@Inproceedings{10.1007/978-3-030-58342-2_5, 264 264 @Inproceedings{10.1007/978-3-030-58342-2_5,
author={Feely, Ciara 265 265 author={Feely, Ciara
and Caulfield, Brian 266 266 and Caulfield, Brian
and Lawlor, Aonghus 267 267 and Lawlor, Aonghus
and Smyth, Barry}, 268 268 and Smyth, Barry},
editor={Watson, Ian 269 269 editor={Watson, Ian
and Weber, Rosina}, 270 270 and Weber, Rosina},
affiliation={University College Dublin}, 271 271 affiliation={University College Dublin},
title={Using Case-Based Reasoning to Predict Marathon Performance and Recommend Tailored Training Plans}, 272 272 title={Using Case-Based Reasoning to Predict Marathon Performance and Recommend Tailored Training Plans},
keywords={CBR for health and exercise, marathon running, race-time prediction, plan recommendation}, 273 273 keywords={CBR for health and exercise, marathon running, race-time prediction, plan recommendation},
booktitle={Case-Based Reasoning Research and Development}, 274 274 booktitle={Case-Based Reasoning Research and Development},
year={2020}, 275 275 year={2020},
language = {English}, 276 276 language = {English},
publisher={Springer International Publishing}, 277 277 publisher={Springer International Publishing},
address={Cham}, 278 278 address={Cham},
pages={67--81}, 279 279 pages={67--81},
type = {article; proceedings paper}, 280 280 type = {article; proceedings paper},
abstract={Training for the marathon, especially a first marathon, is always a challenge. Many runners struggle to find the right balance between their workouts and their recovery, often leading to sub-optimal performance on race-day or even injury during training. We describe and evaluate a novel case-based reasoning system to help marathon runners as they train in two ways. First, it uses a case-base of training/workouts and race histories to predict future marathon times for a target runner, throughout their training program, helping runners to calibrate their progress and, ultimately, plan their race-day pacing. Second, the system recommends tailored training plans to runners, adapted for their current goal-time target, and based on the training plans of similar runners who have achieved this time. We evaluate the system using a dataset of more than 21,000 unique runners and 1.5 million training/workout sessions.}, 281 281 abstract={Training for the marathon, especially a first marathon, is always a challenge. Many runners struggle to find the right balance between their workouts and their recovery, often leading to sub-optimal performance on race-day or even injury during training. We describe and evaluate a novel case-based reasoning system to help marathon runners as they train in two ways. First, it uses a case-base of training/workouts and race histories to predict future marathon times for a target runner, throughout their training program, helping runners to calibrate their progress and, ultimately, plan their race-day pacing. Second, the system recommends tailored training plans to runners, adapted for their current goal-time target, and based on the training plans of similar runners who have achieved this time. We evaluate the system using a dataset of more than 21,000 unique runners and 1.5 million training/workout sessions.},
isbn={978-3-030-58342-2} 282 282 isbn={978-3-030-58342-2}
} 283 283 }
284 284
@article{LALITHA2020583, 285 285 @article{LALITHA2020583,
title = {Personalised Self-Directed Learning Recommendation System}, 286 286 title = {Personalised Self-Directed Learning Recommendation System},
journal = {Procedia Computer Science}, 287 287 journal = {Procedia Computer Science},
volume = {171}, 288 288 volume = {171},
pages = {583-592}, 289 289 pages = {583-592},
year = {2020}, 290 290 year = {2020},
type = {article}, 291 291 type = {article},
language = {English}, 292 292 language = {English},
note = {Third International Conference on Computing and Network Communications (CoCoNet'19)}, 293 293 note = {Third International Conference on Computing and Network Communications (CoCoNet'19)},
issn = {1877-0509}, 294 294 issn = {1877-0509},
doi = {https://doi.org/10.1016/j.procs.2020.04.063}, 295 295 doi = {https://doi.org/10.1016/j.procs.2020.04.063},
url = {https://www.sciencedirect.com/science/article/pii/S1877050920310309}, 296 296 url = {https://www.sciencedirect.com/science/article/pii/S1877050920310309},
author = {T B Lalitha and P S Sreeja}, 297 297 author = {T B Lalitha and P S Sreeja},
affiliation={Hindustan Institute of Technology and Science}, 298 298 affiliation={Hindustan Institute of Technology and Science},
keywords = {e-Learning, PSDLR, Recommendation System, SDL, Self-Directed Learning}, 299 299 keywords = {e-Learning, PSDLR, Recommendation System, SDL, Self-Directed Learning},
abstract = {Modern educational systems have changed drastically bringing in knowledge anywhere as needed by the learner with the evolution of Internet. Availability of knowledge in public domain, capability of exchanging large amount of information and filtering relevant information quickly has enabled disruption to conventional educational system. Thus, future trends are looking towards E-Learning (Electronic Learning) and M-Learning (Mobile Learning) technologies over the Internet for their vast knowledge acquisition. In this paper, the work gives an elaborate context of learning strategies prevailing and emerging with the classification of e-learning Techniques. It majorly focuses on the features and variety of aspects with the e-learning and the choice of learning method involved and facilitate the adoption of new ways for personalized selection on learning resources for SDL (Self-Directed Learning) from the unstructured, large web-based environment. Thereby, proposes a Personalised Self-Directed Learning Recommendation System (PSDLR) based on the personal specifications of the SDL learner. The result offers insight into the perspectives and challenges of Self-Directed Learning based on cognitive and constructive characteristics which majorly incorporates web-based learning and gives path in finding appropriate solutions using machine learning techniques and ontology for the open problems in the respective fields with personalised recommendations and guidelines for future research.} 300 300 abstract = {Modern educational systems have changed drastically bringing in knowledge anywhere as needed by the learner with the evolution of Internet. Availability of knowledge in public domain, capability of exchanging large amount of information and filtering relevant information quickly has enabled disruption to conventional educational system. Thus, future trends are looking towards E-Learning (Electronic Learning) and M-Learning (Mobile Learning) technologies over the Internet for their vast knowledge acquisition. In this paper, the work gives an elaborate context of learning strategies prevailing and emerging with the classification of e-learning Techniques. It majorly focuses on the features and variety of aspects with the e-learning and the choice of learning method involved and facilitate the adoption of new ways for personalized selection on learning resources for SDL (Self-Directed Learning) from the unstructured, large web-based environment. Thereby, proposes a Personalised Self-Directed Learning Recommendation System (PSDLR) based on the personal specifications of the SDL learner. The result offers insight into the perspectives and challenges of Self-Directed Learning based on cognitive and constructive characteristics which majorly incorporates web-based learning and gives path in finding appropriate solutions using machine learning techniques and ontology for the open problems in the respective fields with personalised recommendations and guidelines for future research.}
} 301 301 }
302 302
@article{Zhou2021, 303 303 @article{Zhou2021,
author={Zhou, Lina 304 304 author={Zhou, Lina
and Wang, Chunxia}, 305 305 and Wang, Chunxia},
affiliation={Baotou Medical College}, 306 306 affiliation={Baotou Medical College},
title={Research on Recommendation of Personalized Exercises in English Learning Based on Data Mining}, 307 307 title={Research on Recommendation of Personalized Exercises in English Learning Based on Data Mining},
journal={Scientific Programming}, 308 308 journal={Scientific Programming},
year={2021}, 309 309 year={2021},
month={Dec}, 310 310 month={Dec},
type = {article}, 311 311 type = {article},
language = {English}, 312 312 language = {English},
day={21}, 313 313 day={21},
publisher={Hindawi}, 314 314 publisher={Hindawi},
keywords={Recommender systems, Learning}, 315 315 keywords={Recommender systems, Learning},
volume={2021}, 316 316 volume={2021},
pages={5042286}, 317 317 pages={5042286},
abstract={Aiming at the problems of traditional method of exercise recommendation precision, recall rate, long recommendation time, and poor recommendation comprehensiveness, this study proposes a personalized exercise recommendation method for English learning based on data mining. Firstly, a personalized recommendation model is designed, based on the model to preprocess the data in the Web access log, and cleaning the noise data to avoid its impact on the accuracy of the recommendation results is focused; secondly, the DINA model to diagnose the degree of mastery of students{\&}{\#}x2019; knowledge points is used and the students{\&}{\#}x2019; browsing patterns through fuzzy similar relationships are clustered; and finally, according to the clustering results, the similarity between students and the similarity between exercises are measured, and the collaborative filtering recommendation of personalized exercises for English learning is realized. The experimental results show that the exercise recommendation precision and recall rate of this method are higher, the recommendation time is shorter, and the recommendation results are comprehensive.}, 318 318 abstract={Aiming at the problems of traditional method of exercise recommendation precision, recall rate, long recommendation time, and poor recommendation comprehensiveness, this study proposes a personalized exercise recommendation method for English learning based on data mining. Firstly, a personalized recommendation model is designed, based on the model to preprocess the data in the Web access log, and cleaning the noise data to avoid its impact on the accuracy of the recommendation results is focused; secondly, the DINA model to diagnose the degree of mastery of students{\&}{\#}x2019; knowledge points is used and the students{\&}{\#}x2019; browsing patterns through fuzzy similar relationships are clustered; and finally, according to the clustering results, the similarity between students and the similarity between exercises are measured, and the collaborative filtering recommendation of personalized exercises for English learning is realized. The experimental results show that the exercise recommendation precision and recall rate of this method are higher, the recommendation time is shorter, and the recommendation results are comprehensive.},
issn={1058-9244}, 319 319 issn={1058-9244},
doi={10.1155/2021/5042286}, 320 320 doi={10.1155/2021/5042286},
url={https://doi.org/10.1155/2021/5042286} 321 321 url={https://doi.org/10.1155/2021/5042286}
} 322 322 }
323 323
@article{INGKAVARA2022100086, 324 324 @article{INGKAVARA2022100086,
title = {The use of a personalized learning approach to implementing self-regulated online learning}, 325 325 title = {The use of a personalized learning approach to implementing self-regulated online learning},
journal = {Computers and Education: Artificial Intelligence}, 326 326 journal = {Computers and Education: Artificial Intelligence},
volume = {3}, 327 327 volume = {3},
pages = {100086}, 328 328 pages = {100086},
type = {article}, 329 329 type = {article},
language = {English}, 330 330 language = {English},
year = {2022}, 331 331 year = {2022},
issn = {2666-920X}, 332 332 issn = {2666-920X},
doi = {https://doi.org/10.1016/j.caeai.2022.100086}, 333 333 doi = {https://doi.org/10.1016/j.caeai.2022.100086},
url = {https://www.sciencedirect.com/science/article/pii/S2666920X22000418}, 334 334 url = {https://www.sciencedirect.com/science/article/pii/S2666920X22000418},
author = {Thanyaluck Ingkavara and Patcharin Panjaburee and Niwat Srisawasdi and Suthiporn Sajjapanroj}, 335 335 author = {Thanyaluck Ingkavara and Patcharin Panjaburee and Niwat Srisawasdi and Suthiporn Sajjapanroj},
keywords = {Intelligent tutoring system, Personalization, Adaptive learning, E-learning, TAM, Artificial intelligence}, 336 336 keywords = {Intelligent tutoring system, Personalization, Adaptive learning, E-learning, TAM, Artificial intelligence},
abstract = {Nowadays, students are encouraged to learn via online learning systems to promote students' autonomy. Scholars have found that students' self-regulated actions impact their academic success in an online learning environment. However, because traditional online learning systems cannot personalize feedback to the student's personality, most students have less chance to obtain helpful suggestions for enhancing their knowledge linked to their learning problems. This paper incorporated self-regulated online learning in the Physics classroom and used a personalized learning approach to help students receive proper learning paths and material corresponding to their learning preferences. This study conducted a quasi-experimental design using a quantitative approach to evaluate the effectiveness of the proposed learning environment in secondary schools. The experimental group of students participated in self-regulated online learning with a personalized learning approach, while the control group participated in conventional self-regulated online learning. The experimental results showed that the experimental group's post-test and the learning-gain score of the experimental group were significantly higher than those of the control group. Moreover, the results also suggested that the student's perceptions about the usefulness of learning suggestions, ease of use, goal setting, learning environmental structuring, task strategies, time management, self-evaluation, impact on learning, and attitude toward the learning environment are important predictors of behavioral intention to learn with the self-regulated online learning that integrated with the personalized learning approach.} 337 337 abstract = {Nowadays, students are encouraged to learn via online learning systems to promote students' autonomy. Scholars have found that students' self-regulated actions impact their academic success in an online learning environment. However, because traditional online learning systems cannot personalize feedback to the student's personality, most students have less chance to obtain helpful suggestions for enhancing their knowledge linked to their learning problems. This paper incorporated self-regulated online learning in the Physics classroom and used a personalized learning approach to help students receive proper learning paths and material corresponding to their learning preferences. This study conducted a quasi-experimental design using a quantitative approach to evaluate the effectiveness of the proposed learning environment in secondary schools. The experimental group of students participated in self-regulated online learning with a personalized learning approach, while the control group participated in conventional self-regulated online learning. The experimental results showed that the experimental group's post-test and the learning-gain score of the experimental group were significantly higher than those of the control group. Moreover, the results also suggested that the student's perceptions about the usefulness of learning suggestions, ease of use, goal setting, learning environmental structuring, task strategies, time management, self-evaluation, impact on learning, and attitude toward the learning environment are important predictors of behavioral intention to learn with the self-regulated online learning that integrated with the personalized learning approach.}
} 338 338 }
339 339
@article{HUANG2023104684, 340 340 @article{HUANG2023104684,
title = {Effects of artificial Intelligence–Enabled personalized recommendations on learners’ learning engagement, motivation, and outcomes in a flipped classroom}, 341 341 title = {Effects of artificial Intelligence–Enabled personalized recommendations on learners’ learning engagement, motivation, and outcomes in a flipped classroom},
journal = {Computers and Education}, 342 342 journal = {Computers and Education},
volume = {194}, 343 343 volume = {194},
pages = {104684}, 344 344 pages = {104684},
year = {2023}, 345 345 year = {2023},
language = {English}, 346 346 language = {English},
type = {article}, 347 347 type = {article},
issn = {0360-1315}, 348 348 issn = {0360-1315},
doi = {https://doi.org/10.1016/j.compedu.2022.104684}, 349 349 doi = {https://doi.org/10.1016/j.compedu.2022.104684},
url = {https://www.sciencedirect.com/science/article/pii/S036013152200255X}, 350 350 url = {https://www.sciencedirect.com/science/article/pii/S036013152200255X},
author = {Anna Y.Q. Huang and Owen H.T. Lu and Stephen J.H. Yang}, 351 351 author = {Anna Y.Q. Huang and Owen H.T. Lu and Stephen J.H. Yang},
keywords = {Data science applications in education, Distance education and online learning, Improving classroom teaching}, 352 352 keywords = {Data science applications in education, Distance education and online learning, Improving classroom teaching},
abstract = {The flipped classroom approach is aimed at improving learning outcomes by promoting learning motivation and engagement. Recommendation systems can also be used to improve learning outcomes. With the rapid development of artificial intelligence (AI) technology, various systems have been developed to facilitate student learning. Accordingly, we applied AI-enabled personalized video recommendations to stimulate students' learning motivation and engagement during a systems programming course in a flipped classroom setting. We assigned students to control and experimental groups comprising 59 and 43 college students, respectively. The students in both groups received flipped classroom instruction, but only those in the experimental group received AI-enabled personalized video recommendations. We quantitatively measured students’ engagement based on their learning profiles in a learning management system. The results revealed that the AI-enabled personalized video recommendations could significantly improve the learning performance and engagement of students with a moderate motivation level.} 353 353 abstract = {The flipped classroom approach is aimed at improving learning outcomes by promoting learning motivation and engagement. Recommendation systems can also be used to improve learning outcomes. With the rapid development of artificial intelligence (AI) technology, various systems have been developed to facilitate student learning. Accordingly, we applied AI-enabled personalized video recommendations to stimulate students' learning motivation and engagement during a systems programming course in a flipped classroom setting. We assigned students to control and experimental groups comprising 59 and 43 college students, respectively. The students in both groups received flipped classroom instruction, but only those in the experimental group received AI-enabled personalized video recommendations. We quantitatively measured students’ engagement based on their learning profiles in a learning management system. The results revealed that the AI-enabled personalized video recommendations could significantly improve the learning performance and engagement of students with a moderate motivation level.}
} 354 354 }
355 355
@article{ZHAO2023118535, 356 356 @article{ZHAO2023118535,
title = {A recommendation system for effective learning strategies: An integrated approach using context-dependent DEA}, 357 357 title = {A recommendation system for effective learning strategies: An integrated approach using context-dependent DEA},
journal = {Expert Systems with Applications}, 358 358 journal = {Expert Systems with Applications},
volume = {211}, 359 359 volume = {211},
pages = {118535}, 360 360 pages = {118535},
year = {2023}, 361 361 year = {2023},
language = {English}, 362 362 language = {English},
type = {article}, 363 363 type = {article},
issn = {0957-4174}, 364 364 issn = {0957-4174},
doi = {https://doi.org/10.1016/j.eswa.2022.118535}, 365 365 doi = {https://doi.org/10.1016/j.eswa.2022.118535},
url = {https://www.sciencedirect.com/science/article/pii/S0957417422016104}, 366 366 url = {https://www.sciencedirect.com/science/article/pii/S0957417422016104},
author = {Lu-Tao Zhao and Dai-Song Wang and Feng-Yun Liang and Jian Chen}, 367 367 author = {Lu-Tao Zhao and Dai-Song Wang and Feng-Yun Liang and Jian Chen},
keywords = {Recommendation system, Learning strategies, Context-dependent DEA, Efficiency analysis}, 368 368 keywords = {Recommendation system, Learning strategies, Context-dependent DEA, Efficiency analysis},
abstract = {Universities have been focusing on increasing individualized training and providing appropriate education for students. The individual differences and learning needs of college students should be given enough attention. From the perspective of learning efficiency, we establish a clustering hierarchical progressive improvement model (CHPI), which is based on cluster analysis and context-dependent data envelopment analysis (DEA) methods. The CHPI clusters students' ontological features, employs the context-dependent DEA method to stratify students of different classes, and calculates measures, such as obstacles, to determine the reference path for individuals with inefficient learning processes. The learning strategies are determined according to the gap between the inefficient individual to be improved and the individuals on the reference path. By the study of college English courses as an example, it is found that the CHPI can accurately recommend targeted learning strategies to satisfy the individual needs of college students so that the learning of individuals with inefficient learning processes in a certain stage can be effectively improved. In addition, CHPI can provide specific, efficient suggestions to improve learning efficiency comparing to existing recommendation systems, and has great potential in promoting the integration of education-related researches and expert systems.} 369 369 abstract = {Universities have been focusing on increasing individualized training and providing appropriate education for students. The individual differences and learning needs of college students should be given enough attention. From the perspective of learning efficiency, we establish a clustering hierarchical progressive improvement model (CHPI), which is based on cluster analysis and context-dependent data envelopment analysis (DEA) methods. The CHPI clusters students' ontological features, employs the context-dependent DEA method to stratify students of different classes, and calculates measures, such as obstacles, to determine the reference path for individuals with inefficient learning processes. The learning strategies are determined according to the gap between the inefficient individual to be improved and the individuals on the reference path. By the study of college English courses as an example, it is found that the CHPI can accurately recommend targeted learning strategies to satisfy the individual needs of college students so that the learning of individuals with inefficient learning processes in a certain stage can be effectively improved. In addition, CHPI can provide specific, efficient suggestions to improve learning efficiency comparing to existing recommendation systems, and has great potential in promoting the integration of education-related researches and expert systems.}
} 370 370 }
371 371
@article{SU2022109547, 372 372 @article{SU2022109547,
title = {Graph-based cognitive diagnosis for intelligent tutoring systems}, 373 373 title = {Graph-based cognitive diagnosis for intelligent tutoring systems},
journal = {Knowledge-Based Systems}, 374 374 journal = {Knowledge-Based Systems},
volume = {253}, 375 375 volume = {253},
pages = {109547}, 376 376 pages = {109547},
year = {2022}, 377 377 year = {2022},
language = {English}, 378 378 language = {English},
type = {article}, 379 379 type = {article},
issn = {0950-7051}, 380 380 issn = {0950-7051},
doi = {https://doi.org/10.1016/j.knosys.2022.109547}, 381 381 doi = {https://doi.org/10.1016/j.knosys.2022.109547},
url = {https://www.sciencedirect.com/science/article/pii/S095070512200778X}, 382 382 url = {https://www.sciencedirect.com/science/article/pii/S095070512200778X},
author = {Yu Su and Zeyu Cheng and Jinze Wu and Yanmin Dong and Zhenya Huang and Le Wu and Enhong Chen and Shijin Wang and Fei Xie}, 383 383 author = {Yu Su and Zeyu Cheng and Jinze Wu and Yanmin Dong and Zhenya Huang and Le Wu and Enhong Chen and Shijin Wang and Fei Xie},
keywords = {Cognitive diagnosis, Graph neural networks, Interpretable machine learning}, 384 384 keywords = {Cognitive diagnosis, Graph neural networks, Interpretable machine learning},
abstract = {For intelligent tutoring systems, Cognitive Diagnosis (CD) is a fundamental task that aims to estimate the mastery degree of a student on each skill according to the exercise record. The CD task is considered rather challenging since we need to model inner-relations and inter-relations among students, skills, and questions to obtain more abundant information. Most existing methods attempt to solve this problem through two-way interactions between students and questions (or between students and skills), ignoring potential high-order relations among entities. Furthermore, how to construct an end-to-end framework that can model the complex interactions among different types of entities at the same time remains unexplored. Therefore, in this paper, we propose a graph-based Cognitive Diagnosis model (GCDM) that directly discovers the interactions among students, skills, and questions through a heterogeneous cognitive graph. Specifically, we design two graph-based layers: a performance-relative propagator and an attentive knowledge aggregator. The former is applied to propagate a student’s cognitive state through different types of graph edges, while the latter selectively gathers messages from neighboring graph nodes. Extensive experimental results on two real-world datasets clearly show the effectiveness and extendibility of our proposed model.} 385 385 abstract = {For intelligent tutoring systems, Cognitive Diagnosis (CD) is a fundamental task that aims to estimate the mastery degree of a student on each skill according to the exercise record. The CD task is considered rather challenging since we need to model inner-relations and inter-relations among students, skills, and questions to obtain more abundant information. Most existing methods attempt to solve this problem through two-way interactions between students and questions (or between students and skills), ignoring potential high-order relations among entities. Furthermore, how to construct an end-to-end framework that can model the complex interactions among different types of entities at the same time remains unexplored. Therefore, in this paper, we propose a graph-based Cognitive Diagnosis model (GCDM) that directly discovers the interactions among students, skills, and questions through a heterogeneous cognitive graph. Specifically, we design two graph-based layers: a performance-relative propagator and an attentive knowledge aggregator. The former is applied to propagate a student’s cognitive state through different types of graph edges, while the latter selectively gathers messages from neighboring graph nodes. Extensive experimental results on two real-world datasets clearly show the effectiveness and extendibility of our proposed model.}
} 386 386 }
387 387
@article{EZALDEEN2022100700, 388 388 @article{EZALDEEN2022100700,
title = {A hybrid E-learning recommendation integrating adaptive profiling and sentiment analysis}, 389 389 title = {A hybrid E-learning recommendation integrating adaptive profiling and sentiment analysis},
journal = {Journal of Web Semantics}, 390 390 journal = {Journal of Web Semantics},
volume = {72}, 391 391 volume = {72},
pages = {100700}, 392 392 pages = {100700},
year = {2022}, 393 393 year = {2022},
type = {article}, 394 394 type = {article},
language = {English}, 395 395 language = {English},
issn = {1570-8268}, 396 396 issn = {1570-8268},
doi = {https://doi.org/10.1016/j.websem.2021.100700}, 397 397 doi = {https://doi.org/10.1016/j.websem.2021.100700},
url = {https://www.sciencedirect.com/science/article/pii/S1570826821000664}, 398 398 url = {https://www.sciencedirect.com/science/article/pii/S1570826821000664},
author = {Hadi Ezaldeen and Rachita Misra and Sukant Kishoro Bisoy and Rawaa Alatrash and Rojalina Priyadarshini}, 399 399 author = {Hadi Ezaldeen and Rachita Misra and Sukant Kishoro Bisoy and Rawaa Alatrash and Rojalina Priyadarshini},
keywords = {Hybrid E-learning recommendation, Adaptive profiling, Semantic learner profile, Fine-grained sentiment analysis, Convolutional Neural Network, Word embeddings}, 400 400 keywords = {Hybrid E-learning recommendation, Adaptive profiling, Semantic learner profile, Fine-grained sentiment analysis, Convolutional Neural Network, Word embeddings},
abstract = {This research proposes a novel framework named Enhanced e-Learning Hybrid Recommender System (ELHRS) that provides an appropriate e-content with the highest predicted ratings corresponding to the learner’s particular needs. To accomplish this, a new model is developed to deduce the Semantic Learner Profile automatically. It adaptively associates the learning patterns and rules depending on the learner’s behavior and the semantic relations computed in the semantic matrix that mutually links e-learning materials and terms. Here, a semantic-based approach for term expansion is introduced using DBpedia and WordNet ontologies. Further, various sentiment analysis models are proposed and incorporated as a part of the recommender system to predict ratings of e-learning resources from posted text reviews utilizing fine-grained sentiment classification on five discrete classes. Qualitative Natural Language Processing (NLP) methods with tailored-made Convolutional Neural Network (CNN) are developed and evaluated on our customized dataset collected for a specific domain and a public dataset. Two improved language models are introduced depending on Skip-Gram (S-G) and Continuous Bag of Words (CBOW) techniques. In addition, a robust language model based on hybridization of these couple of methods is developed to derive better vocabulary representation, yielding better accuracy 89.1% for the CNN-Three-Channel-Concatenation model. The suggested recommendation methodology depends on the learner’s preferences, other similar learners’ experience and background, deriving their opinions from the reviews towards the best learning resources. This assists the learners in finding the desired e-content at the proper time.} 401 401 abstract = {This research proposes a novel framework named Enhanced e-Learning Hybrid Recommender System (ELHRS) that provides an appropriate e-content with the highest predicted ratings corresponding to the learner’s particular needs. To accomplish this, a new model is developed to deduce the Semantic Learner Profile automatically. It adaptively associates the learning patterns and rules depending on the learner’s behavior and the semantic relations computed in the semantic matrix that mutually links e-learning materials and terms. Here, a semantic-based approach for term expansion is introduced using DBpedia and WordNet ontologies. Further, various sentiment analysis models are proposed and incorporated as a part of the recommender system to predict ratings of e-learning resources from posted text reviews utilizing fine-grained sentiment classification on five discrete classes. Qualitative Natural Language Processing (NLP) methods with tailored-made Convolutional Neural Network (CNN) are developed and evaluated on our customized dataset collected for a specific domain and a public dataset. Two improved language models are introduced depending on Skip-Gram (S-G) and Continuous Bag of Words (CBOW) techniques. In addition, a robust language model based on hybridization of these couple of methods is developed to derive better vocabulary representation, yielding better accuracy 89.1% for the CNN-Three-Channel-Concatenation model. The suggested recommendation methodology depends on the learner’s preferences, other similar learners’ experience and background, deriving their opinions from the reviews towards the best learning resources. This assists the learners in finding the desired e-content at the proper time.}
} 402 402 }
403 403
@article{MUANGPRATHUB2020e05227, 404 404 @article{MUANGPRATHUB2020e05227,
title = {Learning recommendation with formal concept analysis for intelligent tutoring system}, 405 405 title = {Learning recommendation with formal concept analysis for intelligent tutoring system},
journal = {Heliyon}, 406 406 journal = {Heliyon},
volume = {6}, 407 407 volume = {6},
number = {10}, 408 408 number = {10},
pages = {e05227}, 409 409 pages = {e05227},
language = {English}, 410 410 language = {English},
type = {article}, 411 411 type = {article},
year = {2020}, 412 412 year = {2020},
issn = {2405-8440}, 413 413 issn = {2405-8440},
doi = {https://doi.org/10.1016/j.heliyon.2020.e05227}, 414 414 doi = {https://doi.org/10.1016/j.heliyon.2020.e05227},
url = {https://www.sciencedirect.com/science/article/pii/S2405844020320703}, 415 415 url = {https://www.sciencedirect.com/science/article/pii/S2405844020320703},
author = {Jirapond Muangprathub and Veera Boonjing and Kosin Chamnongthai}, 416 416 author = {Jirapond Muangprathub and Veera Boonjing and Kosin Chamnongthai},
keywords = {Computer Science, Learning recommendation, Formal concept analysis, Intelligent tutoring system, Adaptive learning}, 417 417 keywords = {Computer Science, Learning recommendation, Formal concept analysis, Intelligent tutoring system, Adaptive learning},
abstract = {The aim of this research was to develop a learning recommendation component in an intelligent tutoring system (ITS) that dynamically predicts and adapts to a learner's style. In order to develop a proper ITS, we present an improved knowledge base supporting adaptive learning, which can be achieved by a suitable knowledge construction. This process is illustrated by implementing a web-based online tutor system. In addition, our knowledge structure provides adaptive presentation and personalized learning with the proposed adaptive algorithm, to retrieve content according to individual learner characteristics. To demonstrate the proposed adaptive algorithm, pre-test and post-test were used to evaluate suggestion accuracy of the course in a class for adapting to a learner's style. In addition, pre- and post-testing were also used with students in a real teaching/learning environment to evaluate the performance of the proposed model. The results show that the proposed system can be used to help students or learners achieve improved learning.} 418 418 abstract = {The aim of this research was to develop a learning recommendation component in an intelligent tutoring system (ITS) that dynamically predicts and adapts to a learner's style. In order to develop a proper ITS, we present an improved knowledge base supporting adaptive learning, which can be achieved by a suitable knowledge construction. This process is illustrated by implementing a web-based online tutor system. In addition, our knowledge structure provides adaptive presentation and personalized learning with the proposed adaptive algorithm, to retrieve content according to individual learner characteristics. To demonstrate the proposed adaptive algorithm, pre-test and post-test were used to evaluate suggestion accuracy of the course in a class for adapting to a learner's style. In addition, pre- and post-testing were also used with students in a real teaching/learning environment to evaluate the performance of the proposed model. The results show that the proposed system can be used to help students or learners achieve improved learning.}
} 419 419 }
420 420
@article{min8100434, 421 421 @article{min8100434,
author = {Leikola, Maria and Sauer, Christian and Rintala, Lotta and Aromaa, Jari and Lundström, Mari}, 422 422 author = {Leikola, Maria and Sauer, Christian and Rintala, Lotta and Aromaa, Jari and Lundström, Mari},
title = {Assessing the Similarity of Cyanide-Free Gold Leaching Processes: A Case-Based Reasoning Application}, 423 423 title = {Assessing the Similarity of Cyanide-Free Gold Leaching Processes: A Case-Based Reasoning Application},
journal = {Minerals}, 424 424 journal = {Minerals},
volume = {8}, 425 425 volume = {8},
type = {article}, 426 426 type = {article},
language = {English}, 427 427 language = {English},
year = {2018}, 428 428 year = {2018},
number = {10}, 429 429 number = {10},
url = {https://www.mdpi.com/2075-163X/8/10/434}, 430 430 url = {https://www.mdpi.com/2075-163X/8/10/434},
issn = {2075-163X}, 431 431 issn = {2075-163X},
keywords={hydrometallurgy, cyanide-free gold, knowledge modelling, case-based reasoning, information retrieval}, 432 432 keywords={hydrometallurgy, cyanide-free gold, knowledge modelling, case-based reasoning, information retrieval},
abstract = {Hydrometallurgical researchers, and other professionals alike, invest significant amounts of time reading scientific articles, technical notes, and other scientific documents, while looking for the most relevant information for their particular research interest. In an attempt to save the researcher’s time, this study presents an information retrieval tool using case-based reasoning. The tool was built for comparing scientific articles concerning cyanide-free leaching of gold ores/concentrates/tailings. Altogether, 50 cases of experiments were gathered in a case base. 15 different attributes related to the treatment of the raw material and the leaching conditions were selected to compare the cases. The attributes were as follows: Pretreatment, Overall method, Complexant source, Oxidant source, Complexant concentration, Oxidant concentration, Temperature, pH, Redox-potential, Pressure, Materials of construction, Extraction, Extraction rate, Reagent consumption, and Solid-liquid ratio. The resulting retrieval tool (LeachSim) was able to rank the scientific articles according to their similarity with the user’s research interest. Such a tool could eventually aid the user in finding the most relevant information, but not replace thorough understanding and human expertise.}, 433 433 abstract = {Hydrometallurgical researchers, and other professionals alike, invest significant amounts of time reading scientific articles, technical notes, and other scientific documents, while looking for the most relevant information for their particular research interest. In an attempt to save the researcher’s time, this study presents an information retrieval tool using case-based reasoning. The tool was built for comparing scientific articles concerning cyanide-free leaching of gold ores/concentrates/tailings. Altogether, 50 cases of experiments were gathered in a case base. 15 different attributes related to the treatment of the raw material and the leaching conditions were selected to compare the cases. The attributes were as follows: Pretreatment, Overall method, Complexant source, Oxidant source, Complexant concentration, Oxidant concentration, Temperature, pH, Redox-potential, Pressure, Materials of construction, Extraction, Extraction rate, Reagent consumption, and Solid-liquid ratio. The resulting retrieval tool (LeachSim) was able to rank the scientific articles according to their similarity with the user’s research interest. Such a tool could eventually aid the user in finding the most relevant information, but not replace thorough understanding and human expertise.},
doi = {10.3390/min8100434} 434 434 doi = {10.3390/min8100434}
} 435 435 }
436 436
@article{10.1145/3459665, 437 437 @article{10.1145/3459665,
author = {Cunningham, P\'{a}draig and Delany, Sarah Jane}, 438 438 author = {Cunningham, P\'{a}draig and Delany, Sarah Jane},
title = {K-Nearest Neighbour Classifiers - A Tutorial}, 439 439 title = {K-Nearest Neighbour Classifiers - A Tutorial},
year = {2021}, 440 440 year = {2021},
issue_date = {July 2022}, 441 441 issue_date = {July 2022},
publisher = {Association for Computing Machinery}, 442 442 publisher = {Association for Computing Machinery},
address = {New York, NY, USA}, 443 443 address = {New York, NY, USA},
type={article}, 444 444 type={article},
language={English}, 445 445 language={English},
volume = {54}, 446 446 volume = {54},
number = {6}, 447 447 number = {6},
issn = {0360-0300}, 448 448 issn = {0360-0300},
url = {https://doi.org/10.1145/3459665}, 449 449 url = {https://doi.org/10.1145/3459665},
doi = {10.1145/3459665}, 450 450 doi = {10.1145/3459665},
abstract = {Perhaps the most straightforward classifier in the arsenal or Machine Learning techniques is the Nearest Neighbour Classifier—classification is achieved by identifying the nearest neighbours to a query example and using those neighbours to determine the class of the query. This approach to classification is of particular importance, because issues of poor runtime performance is not such a problem these days with the computational power that is available. This article presents an overview of techniques for Nearest Neighbour classification focusing on: mechanisms for assessing similarity (distance), computational issues in identifying nearest neighbours, and mechanisms for reducing the dimension of the data.This article is the second edition of a paper previously published as a technical report [16]. Sections on similarity measures for time-series, retrieval speedup, and intrinsic dimensionality have been added. An Appendix is included, providing access to Python code for the key methods.}, 451 451 abstract = {Perhaps the most straightforward classifier in the arsenal or Machine Learning techniques is the Nearest Neighbour Classifier—classification is achieved by identifying the nearest neighbours to a query example and using those neighbours to determine the class of the query. This approach to classification is of particular importance, because issues of poor runtime performance is not such a problem these days with the computational power that is available. This article presents an overview of techniques for Nearest Neighbour classification focusing on: mechanisms for assessing similarity (distance), computational issues in identifying nearest neighbours, and mechanisms for reducing the dimension of the data.This article is the second edition of a paper previously published as a technical report [16]. Sections on similarity measures for time-series, retrieval speedup, and intrinsic dimensionality have been added. An Appendix is included, providing access to Python code for the key methods.},
journal = {ACM Comput. Surv.}, 452 452 journal = {ACM Comput. Surv.},
month = {jul}, 453 453 month = {jul},
articleno = {128}, 454 454 articleno = {128},
numpages = {25}, 455 455 numpages = {25},
keywords = {k-Nearest neighbour classifiers} 456 456 keywords = {k-Nearest neighbour classifiers}
} 457 457 }
458 458
@article{9072123, 459 459 @article{9072123,
author={Sinaga, Kristina P. and Yang, Miin-Shen}, 460 460 author={Sinaga, Kristina P. and Yang, Miin-Shen},
journal={IEEE Access}, 461 461 journal={IEEE Access},
type={article}, 462 462 type={article},
language={English}, 463 463 language={English},
title={Unsupervised K-Means Clustering Algorithm}, 464 464 title={Unsupervised K-Means Clustering Algorithm},
year={2020}, 465 465 year={2020},
volume={8}, 466 466 volume={8},
number={}, 467 467 number={},
pages={80716-80727}, 468 468 pages={80716-80727},
doi={10.1109/ACCESS.2020.2988796} 469 469 doi={10.1109/ACCESS.2020.2988796}
} 470 470 }
471 471
@article{WANG2021331, 472 472 @article{WANG2021331,
title = {A new prediction strategy for dynamic multi-objective optimization using Gaussian Mixture Model}, 473 473 title = {A new prediction strategy for dynamic multi-objective optimization using Gaussian Mixture Model},
journal = {Information Sciences}, 474 474 journal = {Information Sciences},
volume = {580}, 475 475 volume = {580},
type = {article}, 476 476 type = {article},
language = {English}, 477 477 language = {English},
pages = {331-351}, 478 478 pages = {331-351},
year = {2021}, 479 479 year = {2021},
issn = {0020-0255}, 480 480 issn = {0020-0255},
doi = {https://doi.org/10.1016/j.ins.2021.08.065}, 481 481 doi = {https://doi.org/10.1016/j.ins.2021.08.065},
url = {https://www.sciencedirect.com/science/article/pii/S0020025521008732}, 482 482 url = {https://www.sciencedirect.com/science/article/pii/S0020025521008732},
author = {Feng Wang and Fanshu Liao and Yixuan Li and Hui Wang}, 483 483 author = {Feng Wang and Fanshu Liao and Yixuan Li and Hui Wang},
keywords = {Dynamic multi-objective optimization, Gaussian Mixture Model, Change type detection, Resampling}, 484 484 keywords = {Dynamic multi-objective optimization, Gaussian Mixture Model, Change type detection, Resampling},
abstract = {Dynamic multi-objective optimization problems (DMOPs), in which the environments change over time, have attracted many researchers’ attention in recent years. Since the Pareto set (PS) or the Pareto front (PF) can change over time, how to track the movement of the PS or PF is a challenging problem in DMOPs. Over the past few years, lots of methods have been proposed, and the prediction based strategy has been considered the most effective way to track the new PS. However, the performance of most existing prediction strategies depends greatly on the quantity and quality of the historical information and will deteriorate due to non-linear changes, leading to poor results. In this paper, we propose a new prediction method, named MOEA/D-GMM, which incorporates the Gaussian Mixture Model (GMM) into the MOEA/D framework for the prediction of the new PS when changes occur. Since GMM is a powerful non-linear model to accurately fit various data distributions, it can effectively generate solutions with better quality according to the distributions. In the proposed algorithm, a change type detection strategy is first designed to estimate an approximate PS according to different change types. Then, GMM is employed to make a more accurate prediction by training it with the approximate PS. To overcome the shortcoming of a lack of training solutions for GMM, the Empirical Cumulative Distribution Function (ECDF) method is used to resample more training solutions before GMM training. Experimental results on various benchmark test problems and a classical real-world problem show that, compared with some state-of-the-art dynamic optimization algorithms, MOEA/D-GMM outperforms others in most cases.} 485 485 abstract = {Dynamic multi-objective optimization problems (DMOPs), in which the environments change over time, have attracted many researchers’ attention in recent years. Since the Pareto set (PS) or the Pareto front (PF) can change over time, how to track the movement of the PS or PF is a challenging problem in DMOPs. Over the past few years, lots of methods have been proposed, and the prediction based strategy has been considered the most effective way to track the new PS. However, the performance of most existing prediction strategies depends greatly on the quantity and quality of the historical information and will deteriorate due to non-linear changes, leading to poor results. In this paper, we propose a new prediction method, named MOEA/D-GMM, which incorporates the Gaussian Mixture Model (GMM) into the MOEA/D framework for the prediction of the new PS when changes occur. Since GMM is a powerful non-linear model to accurately fit various data distributions, it can effectively generate solutions with better quality according to the distributions. In the proposed algorithm, a change type detection strategy is first designed to estimate an approximate PS according to different change types. Then, GMM is employed to make a more accurate prediction by training it with the approximate PS. To overcome the shortcoming of a lack of training solutions for GMM, the Empirical Cumulative Distribution Function (ECDF) method is used to resample more training solutions before GMM training. Experimental results on various benchmark test problems and a classical real-world problem show that, compared with some state-of-the-art dynamic optimization algorithms, MOEA/D-GMM outperforms others in most cases.}
} 486 486 }
487 487
@article{9627973, 488 488 @article{9627973,
author={Xu, Shengbing and Cai, Wei and Xia, Hongxi and Liu, Bo and Xu, Jie}, 489 489 author={Xu, Shengbing and Cai, Wei and Xia, Hongxi and Liu, Bo and Xu, Jie},
journal={IEEE Access}, 490 490 journal={IEEE Access},
title={Dynamic Metric Accelerated Method for Fuzzy Clustering}, 491 491 title={Dynamic Metric Accelerated Method for Fuzzy Clustering},
year={2021}, 492 492 year={2021},
type={article}, 493 493 type={article},
language={English}, 494 494 language={English},
volume={9}, 495 495 volume={9},
number={}, 496 496 number={},
pages={166838-166854}, 497 497 pages={166838-166854},
doi={10.1109/ACCESS.2021.3131368} 498 498 doi={10.1109/ACCESS.2021.3131368}
} 499 499 }
500 500
@article{9434422, 501 501 @article{9434422,
author={Gupta, Samarth and Chaudhari, Shreyas and Joshi, Gauri and Yağan, Osman}, 502 502 author={Gupta, Samarth and Chaudhari, Shreyas and Joshi, Gauri and Yağan, Osman},
journal={IEEE Transactions on Information Theory}, 503 503 journal={IEEE Transactions on Information Theory},
title={Multi-Armed Bandits With Correlated Arms}, 504 504 title={Multi-Armed Bandits With Correlated Arms},
year={2021}, 505 505 year={2021},
language={English}, 506 506 language={English},
type={article}, 507 507 type={article},
volume={67}, 508 508 volume={67},
number={10}, 509 509 number={10},
pages={6711-6732}, 510 510 pages={6711-6732},
doi={10.1109/TIT.2021.3081508} 511 511 doi={10.1109/TIT.2021.3081508}
} 512 512 }
513 513
@Inproceedings{8495930, 514 514 @Inproceedings{8495930,
author={Supic, H.}, 515 515 author={Supic, H.},
booktitle={2018 IEEE 27th International Conference on Enabling Technologies: Infrastructure for Collaborative Enterprises (WETICE)}, 516 516 booktitle={2018 IEEE 27th International Conference on Enabling Technologies: Infrastructure for Collaborative Enterprises (WETICE)},
title={Case-Based Reasoning Model for Personalized Learning Path Recommendation in Example-Based Learning Activities}, 517 517 title={Case-Based Reasoning Model for Personalized Learning Path Recommendation in Example-Based Learning Activities},
year={2018}, 518 518 year={2018},
type={article}, 519 519 type={article},
language={English}, 520 520 language={English},
volume={}, 521 521 volume={},
number={}, 522 522 number={},
pages={175-178}, 523 523 pages={175-178},
doi={10.1109/WETICE.2018.00040} 524 524 doi={10.1109/WETICE.2018.00040}
} 525 525 }
526 526
@Inproceedings{9870279, 527 527 @Inproceedings{9870279,
author={Lin, Baihan}, 528 528 author={Lin, Baihan},
booktitle={2022 IEEE Congress on Evolutionary Computation (CEC)}, 529 529 booktitle={2022 IEEE Congress on Evolutionary Computation (CEC)},
title={Evolutionary Multi-Armed Bandits with Genetic Thompson Sampling}, 530 530 title={Evolutionary Multi-Armed Bandits with Genetic Thompson Sampling},
year={2022}, 531 531 year={2022},
type={article}, 532 532 type={article},
language={English}, 533 533 language={English},
volume={}, 534 534 volume={},
number={}, 535 535 number={},
pages={1-8}, 536 536 pages={1-8},
doi={10.1109/CEC55065.2022.9870279} 537 537 doi={10.1109/CEC55065.2022.9870279}
} 538 538 }
539 539
@article{Obeid, 540 540 @article{Obeid,
author={Obeid, C. and Lahoud, C. and Khoury, H. E. and Champin, P.}, 541 541 author={Obeid, C. and Lahoud, C. and Khoury, H. E. and Champin, P.},
title={A Novel Hybrid Recommender System Approach for Student Academic Advising Named COHRS, Supported by Case-based Reasoning and Ontology}, 542 542 title={A Novel Hybrid Recommender System Approach for Student Academic Advising Named COHRS, Supported by Case-based Reasoning and Ontology},
journal={Computer Science and Information Systems}, 543 543 journal={Computer Science and Information Systems},
type={article}, 544 544 type={article},
language={English}, 545 545 language={English},
volume={19}, 546 546 volume={19},
number={2}, 547 547 number={2},
pages={979–1005}, 548 548 pages={979–1005},
year={2022}, 549 549 year={2022},
doi={https://doi.org/10.2298/CSIS220215011O} 550 550 doi={https://doi.org/10.2298/CSIS220215011O}
} 551 551 }
552 552
@book{Nkambou, 553 553 @book{Nkambou,
author = {Nkambou, R. and Bourdeau, J. and Mizoguchi, R.}, 554 554 author = {Nkambou, R. and Bourdeau, J. and Mizoguchi, R.},
title = {Advances in Intelligent Tutoring Systems}, 555 555 title = {Advances in Intelligent Tutoring Systems},
year = {2010}, 556 556 year = {2010},
type = {article}, 557 557 type = {article},
language = {English}, 558 558 language = {English},
publisher = {Springer Berlin, Heidelberg}, 559 559 publisher = {Springer Berlin, Heidelberg},
edition = {1} 560 560 edition = {1}
} 561 561 }
562 562
@book{hajduk2019cognitive, 563 563 @book{hajduk2019cognitive,
title={Cognitive Multi-agent Systems: Structures, Strategies and Applications to Mobile Robotics and Robosoccer}, 564 564 title={Cognitive Multi-agent Systems: Structures, Strategies and Applications to Mobile Robotics and Robosoccer},
author={Hajduk, M. and Sukop, M. and Haun, M.}, 565 565 author={Hajduk, M. and Sukop, M. and Haun, M.},
type={book}, 566 566 type={book},
language={English}, 567 567 language={English},
isbn={9783319936857}, 568 568 isbn={9783319936857},
series={Studies in Systems, Decision and Control}, 569 569 series={Studies in Systems, Decision and Control},
year={2019}, 570 570 year={2019},
publisher={Springer International Publishing} 571 571 publisher={Springer International Publishing}
} 572 572 }
573 573
@article{RICHTER20093, 574 574 @article{RICHTER20093,
title = {The search for knowledge, contexts, and Case-Based Reasoning}, 575 575 title = {The search for knowledge, contexts, and Case-Based Reasoning},
journal = {Engineering Applications of Artificial Intelligence}, 576 576 journal = {Engineering Applications of Artificial Intelligence},
language = {English}, 577 577 language = {English},
type = {article}, 578 578 type = {article},
volume = {22}, 579 579 volume = {22},
number = {1}, 580 580 number = {1},
pages = {3-9}, 581 581 pages = {3-9},
year = {2009}, 582 582 year = {2009},
issn = {0952-1976}, 583 583 issn = {0952-1976},
doi = {https://doi.org/10.1016/j.engappai.2008.04.021}, 584 584 doi = {https://doi.org/10.1016/j.engappai.2008.04.021},
url = {https://www.sciencedirect.com/science/article/pii/S095219760800078X}, 585 585 url = {https://www.sciencedirect.com/science/article/pii/S095219760800078X},
author = {Michael M. Richter}, 586 586 author = {Michael M. Richter},
keywords = {Case-Based Reasoning, Knowledge, Processes, Utility, Context}, 587 587 keywords = {Case-Based Reasoning, Knowledge, Processes, Utility, Context},
abstract = {A major goal of this paper is to compare Case-Based Reasoning with other methods searching for knowledge. We consider knowledge as a resource that can be traded. It has no value in itself; the value is measured by the usefulness of applying it in some process. Such a process has info-needs that have to be satisfied. The concept to measure this is the economical term utility. In general, utility depends on the user and its context, i.e., it is subjective. Here, we introduce levels of contexts from general to individual. We illustrate that Case-Based Reasoning on the lower, i.e., more personal levels CBR is quite useful, in particular in comparison with traditional informational retrieval methods.} 588 588 abstract = {A major goal of this paper is to compare Case-Based Reasoning with other methods searching for knowledge. We consider knowledge as a resource that can be traded. It has no value in itself; the value is measured by the usefulness of applying it in some process. Such a process has info-needs that have to be satisfied. The concept to measure this is the economical term utility. In general, utility depends on the user and its context, i.e., it is subjective. Here, we introduce levels of contexts from general to individual. We illustrate that Case-Based Reasoning on the lower, i.e., more personal levels CBR is quite useful, in particular in comparison with traditional informational retrieval methods.}
} 589 589 }
590 590
@Thesis{Marie, 591 591 @Thesis{Marie,
author={Marie, F.}, 592 592 author={Marie, F.},
title={COLISEUM-3D. Une plate-forme innovante pour la segmentation d’images médicales par Raisonnement à Partir de Cas (RàPC) et méthodes d’apprentissage de type Deep Learning}, 593 593 title={COLISEUM-3D. Une plate-forme innovante pour la segmentation d’images médicales par Raisonnement à Partir de Cas (RàPC) et méthodes d’apprentissage de type Deep Learning},
type={diplomathesis}, 594 594 type={diplomathesis},
language={French}, 595 595 language={French},
institution={Université de Franche-Comte}, 596 596 institution={Université de Franche-Comte},
year={2019} 597 597 year={2019}
} 598 598 }
599 599
@book{Hoang, 600 600 @book{Hoang,
title = {La formule du savoir. Une philosophie unifiée du savoir fondée sur le théorème de Bayes}, 601 601 title = {La formule du savoir. Une philosophie unifiée du savoir fondée sur le théorème de Bayes},
author = {Hoang, L.N.}, 602 602 author = {Hoang, L.N.},
type = {book}, 603 603 type = {book},
language = {French}, 604 604 language = {French},
isbn = {9782759822607}, 605 605 isbn = {9782759822607},
year = {2018}, 606 606 year = {2018},
publisher = {EDP Sciences} 607 607 publisher = {EDP Sciences}
} 608 608 }
609 609
@book{Richter2013, 610 610 @book{Richter2013,
title={Case-Based Reasoning (A Textbook)}, 611 611 title={Case-Based Reasoning (A Textbook)},
author={Richter, M. and Weber, R.}, 612 612 author={Richter, M. and Weber, R.},
type={book}, 613 613 type={book},
language={English}, 614 614 language={English},
isbn={9783642401664}, 615 615 isbn={9783642401664},
year={2013}, 616 616 year={2013},
publisher={Springer-Verlag GmbH} 617 617 publisher={Springer-Verlag GmbH}
} 618 618 }
619 619
@book{kedia2020hands, 620 620 @book{kedia2020hands,
title={Hands-On Python Natural Language Processing: Explore tools and techniques to analyze and process text with a view to building real-world NLP applications}, 621 621 title={Hands-On Python Natural Language Processing: Explore tools and techniques to analyze and process text with a view to building real-world NLP applications},
author={Kedia, A. and Rasu, M.}, 622 622 author={Kedia, A. and Rasu, M.},
language={English}, 623 623 language={English},
type={book}, 624 624 type={book},
isbn={9781838982584}, 625 625 isbn={9781838982584},
url={https://books.google.fr/books?id=1AbuDwAAQBAJ}, 626 626 url={https://books.google.fr/books?id=1AbuDwAAQBAJ},
year={2020}, 627 627 year={2020},
publisher={Packt Publishing} 628 628 publisher={Packt Publishing}
} 629 629 }
630 630
@book{ghosh2019natural, 631 631 @book{ghosh2019natural,
title={Natural Language Processing Fundamentals: Build intelligent applications that can interpret the human language to deliver impactful results}, 632 632 title={Natural Language Processing Fundamentals: Build intelligent applications that can interpret the human language to deliver impactful results},
author={Ghosh, S. and Gunning, D.}, 633 633 author={Ghosh, S. and Gunning, D.},
language={English}, 634 634 language={English},
type={book}, 635 635 type={book},
isbn={9781789955989}, 636 636 isbn={9781789955989},
url={https://books.google.fr/books?id=i8-PDwAAQBAJ}, 637 637 url={https://books.google.fr/books?id=i8-PDwAAQBAJ},
year={2019}, 638 638 year={2019},
publisher={Packt Publishing} 639 639 publisher={Packt Publishing}
} 640 640 }
641 641
@article{Akerblom, 642 642 @article{Akerblom,
title={Online learning of network bottlenecks via minimax paths}, 643 643 title={Online learning of network bottlenecks via minimax paths},
author={kerblom, Niklas and Hoseini, Fazeleh Sadat and Haghir Chehreghani, Morteza}, 644 644 author={kerblom, Niklas and Hoseini, Fazeleh Sadat and Haghir Chehreghani, Morteza},
language={English}, 645 645 language={English},
type={article}, 646 646 type={article},
volume = {122}, 647 647 volume = {122},
year = {2023}, 648 648 year = {2023},
issn = {1573-0565}, 649 649 issn = {1573-0565},
doi = {https://doi.org/10.1007/s10994-022-06270-0}, 650 650 doi = {https://doi.org/10.1007/s10994-022-06270-0},
url = {https://doi.org/10.1007/s10994-022-06270-0}, 651 651 url = {https://doi.org/10.1007/s10994-022-06270-0},
abstract={In this paper, we study bottleneck identification in networks via extracting minimax paths. Many real-world networks have stochastic weights for which full knowledge is not available in advance. Therefore, we model this task as a combinatorial semi-bandit problem to which we apply a combinatorial version of Thompson Sampling and establish an upper bound on the corresponding Bayesian regret. Due to the computational intractability of the problem, we then devise an alternative problem formulation which approximates the original objective. Finally, we experimentally evaluate the performance of Thompson Sampling with the approximate formulation on real-world directed and undirected networks.} 652 652 abstract={In this paper, we study bottleneck identification in networks via extracting minimax paths. Many real-world networks have stochastic weights for which full knowledge is not available in advance. Therefore, we model this task as a combinatorial semi-bandit problem to which we apply a combinatorial version of Thompson Sampling and establish an upper bound on the corresponding Bayesian regret. Due to the computational intractability of the problem, we then devise an alternative problem formulation which approximates the original objective. Finally, we experimentally evaluate the performance of Thompson Sampling with the approximate formulation on real-world directed and undirected networks.}
} 653 653 }
654 654
@article{Simen, 655 655 @article{Simen,
title={Dynamic slate recommendation with gated recurrent units and Thompson sampling}, 656 656 title={Dynamic slate recommendation with gated recurrent units and Thompson sampling},
author={Eide, Simen and Leslie, David S. and Frigessi, Arnoldo}, 657 657 author={Eide, Simen and Leslie, David S. and Frigessi, Arnoldo},
language={English}, 658 658 language={English},
type={article}, 659 659 type={article},
volume = {36}, 660 660 volume = {36},
year = {2022}, 661 661 year = {2022},
issn = {1573-756X}, 662 662 issn = {1573-756X},
doi = {https://doi.org/10.1007/s10618-022-00849-w}, 663 663 doi = {https://doi.org/10.1007/s10618-022-00849-w},
url = {https://doi.org/10.1007/s10618-022-00849-w}, 664 664 url = {https://doi.org/10.1007/s10618-022-00849-w},
abstract={We consider the problem of recommending relevant content to users of an internet platform in the form of lists of items, called slates. We introduce a variational Bayesian Recurrent Neural Net recommender system that acts on time series of interactions between the internet platform and the user, and which scales to real world industrial situations. The recommender system is tested both online on real users, and on an offline dataset collected from a Norwegian web-based marketplace, FINN.no, that is made public for research. This is one of the first publicly available datasets which includes all the slates that are presented to users as well as which items (if any) in the slates were clicked on. Such a data set allows us to move beyond the common assumption that implicitly assumes that users are considering all possible items at each interaction. Instead we build our likelihood using the items that are actually in the slate, and evaluate the strengths and weaknesses of both approaches theoretically and in experiments. We also introduce a hierarchical prior for the item parameters based on group memberships. Both item parameters and user preferences are learned probabilistically. Furthermore, we combine our model with bandit strategies to ensure learning, and introduce ‘in-slate Thompson sampling’ which makes use of the slates to maximise explorative opportunities. We show experimentally that explorative recommender strategies perform on par or above their greedy counterparts. Even without making use of exploration to learn more effectively, click rates increase simply because of improved diversity in the recommended slates.} 665 665 abstract={We consider the problem of recommending relevant content to users of an internet platform in the form of lists of items, called slates. We introduce a variational Bayesian Recurrent Neural Net recommender system that acts on time series of interactions between the internet platform and the user, and which scales to real world industrial situations. The recommender system is tested both online on real users, and on an offline dataset collected from a Norwegian web-based marketplace, FINN.no, that is made public for research. This is one of the first publicly available datasets which includes all the slates that are presented to users as well as which items (if any) in the slates were clicked on. Such a data set allows us to move beyond the common assumption that implicitly assumes that users are considering all possible items at each interaction. Instead we build our likelihood using the items that are actually in the slate, and evaluate the strengths and weaknesses of both approaches theoretically and in experiments. We also introduce a hierarchical prior for the item parameters based on group memberships. Both item parameters and user preferences are learned probabilistically. Furthermore, we combine our model with bandit strategies to ensure learning, and introduce ‘in-slate Thompson sampling’ which makes use of the slates to maximise explorative opportunities. We show experimentally that explorative recommender strategies perform on par or above their greedy counterparts. Even without making use of exploration to learn more effectively, click rates increase simply because of improved diversity in the recommended slates.}
} 666 666 }
667 667
@Inproceedings{Arthurs, 668 668 @Inproceedings{Arthurs,
author={Arthurs, Noah and Stenhaug, Ben and Karayev, Sergey and Piech, Chris}, 669 669 author={Arthurs, Noah and Stenhaug, Ben and Karayev, Sergey and Piech, Chris},
booktitle={International Conference on Educational Data Mining (EDM)}, 670 670 booktitle={International Conference on Educational Data Mining (EDM)},
title={Grades Are Not Normal: Improving Exam Score Models Using the Logit-Normal Distribution}, 671 671 title={Grades Are Not Normal: Improving Exam Score Models Using the Logit-Normal Distribution},
year={2019}, 672 672 year={2019},
type={article}, 673 673 type={article},
language={English}, 674 674 language={English},
volume={}, 675 675 volume={},
number={}, 676 676 number={},
pages={6}, 677 677 pages={6},
url={https://eric.ed.gov/?id=ED599204} 678 678 url={https://eric.ed.gov/?id=ED599204}
} 679 679 }
680 680
@article{Bahramian, 681 681 @article{Bahramian,
title={A Cold Start Context-Aware Recommender System for Tour Planning Using Artificial Neural Network and Case Based Reasoning}, 682 682 title={A Cold Start Context-Aware Recommender System for Tour Planning Using Artificial Neural Network and Case Based Reasoning},
author={Bahramian, Zahra and Ali Abbaspour, Rahim and Claramunt, Christophe}, 683 683 author={Bahramian, Zahra and Ali Abbaspour, Rahim and Claramunt, Christophe},
language={English}, 684 684 language={English},
type={article}, 685 685 type={article},
year = {2017}, 686 686 year = {2017},
issn = {1574-017X}, 687 687 issn = {1574-017X},
doi = {https://doi.org/10.1155/2017/9364903}, 688 688 doi = {https://doi.org/10.1155/2017/9364903},
url = {https://doi.org/10.1155/2017/9364903}, 689 689 url = {https://doi.org/10.1155/2017/9364903},
abstract={Nowadays, large amounts of tourism information and services are available over the Web. This makes it difficult for the user to search for some specific information such as selecting a tour in a given city as an ordered set of points of interest. Moreover, the user rarely knows all his needs upfront and his preferences may change during a recommendation process. The user may also have a limited number of initial ratings and most often the recommender system is likely to face the well-known cold start problem. The objective of the research presented in this paper is to introduce a hybrid interactive context-aware tourism recommender system that takes into account user’s feedbacks and additional contextual information. It offers personalized tours to the user based on his preferences thanks to the combination of a case based reasoning framework and an artificial neural network. The proposed method has been tried in the city of Tehran in Iran. The results show that the proposed method outperforms current artificial neural network methods and combinations of case based reasoning with <svg xmlns:xlink="http://www.w3.org/1999/xlink" xmlns="http://www.w3.org/2000/svg" style="vertical-align:-0.2063999pt" id="M1" height="9.49473pt" version="1.1" viewBox="-0.0498162 -9.28833 6.66314 9.49473" width="6.66314pt"><g transform="matrix(.013,0,0,-0.013,0,0)"><path id="g113-108" d="M480 416C480 431 465 448 438 448C388 448 312 383 252 330C217 299 188 273 155 237H153L257 680C262 700 263 712 253 712C240 712 183 684 97 674L92 648L126 647C166 646 172 645 163 606L23 -6L29 -12C51 -5 77 2 107 8C115 62 130 128 142 180C153 193 179 220 204 241C231 170 259 106 288 54C317 0 336 -12 358 -12C381 -12 423 2 477 80L460 100C434 74 408 54 398 54C385 54 374 65 351 107C326 154 282 241 263 299C296 332 351 377 403 377C424 377 436 372 445 368C449 366 456 368 462 375C472 386 480 402 480 416Z"/></g></svg>-nearest neighbor methods in terms of user effort, accuracy, and user satisfaction.} 690 690 abstract={Nowadays, large amounts of tourism information and services are available over the Web. This makes it difficult for the user to search for some specific information such as selecting a tour in a given city as an ordered set of points of interest. Moreover, the user rarely knows all his needs upfront and his preferences may change during a recommendation process. The user may also have a limited number of initial ratings and most often the recommender system is likely to face the well-known cold start problem. The objective of the research presented in this paper is to introduce a hybrid interactive context-aware tourism recommender system that takes into account user’s feedbacks and additional contextual information. It offers personalized tours to the user based on his preferences thanks to the combination of a case based reasoning framework and an artificial neural network. The proposed method has been tried in the city of Tehran in Iran. The results show that the proposed method outperforms current artificial neural network methods and combinations of case based reasoning with <svg xmlns:xlink="http://www.w3.org/1999/xlink" xmlns="http://www.w3.org/2000/svg" style="vertical-align:-0.2063999pt" id="M1" height="9.49473pt" version="1.1" viewBox="-0.0498162 -9.28833 6.66314 9.49473" width="6.66314pt"><g transform="matrix(.013,0,0,-0.013,0,0)"><path id="g113-108" d="M480 416C480 431 465 448 438 448C388 448 312 383 252 330C217 299 188 273 155 237H153L257 680C262 700 263 712 253 712C240 712 183 684 97 674L92 648L126 647C166 646 172 645 163 606L23 -6L29 -12C51 -5 77 2 107 8C115 62 130 128 142 180C153 193 179 220 204 241C231 170 259 106 288 54C317 0 336 -12 358 -12C381 -12 423 2 477 80L460 100C434 74 408 54 398 54C385 54 374 65 351 107C326 154 282 241 263 299C296 332 351 377 403 377C424 377 436 372 445 368C449 366 456 368 462 375C472 386 480 402 480 416Z"/></g></svg>-nearest neighbor methods in terms of user effort, accuracy, and user satisfaction.}
} 691 691 }
692 692
@Thesis{Daubias2011, 693 693 @Thesis{Daubias2011,
author={Sthéphanie Jean-Daubias}, 694 694 author={Sthéphanie Jean-Daubias},
title={Ingénierie des profils d'apprenants}, 695 695 title={Ingénierie des profils d'apprenants},
type={diplomathesis}, 696 696 type={diplomathesis},
language={French}, 697 697 language={French},
institution={Université Claude Bernard Lyon 1}, 698 698 institution={Université Claude Bernard Lyon 1},
year={2011} 699 699 year={2011}
} 700 700 }
701 701
@article{Tapalova, 702 702 @article{Tapalova,
author = {Olga Tapalova and Nadezhda Zhiyenbayeva}, 703 703 author = {Olga Tapalova and Nadezhda Zhiyenbayeva},
title ={Artificial Intelligence in Education: AIEd for Personalised Learning Pathways}, 704 704 title ={Artificial Intelligence in Education: AIEd for Personalised Learning Pathways},
journal = {Electronic Journal of e-Learning}, 705 705 journal = {Electronic Journal of e-Learning},
volume = {}, 706 706 volume = {},
number = {}, 707 707 number = {},
pages = {15}, 708 708 pages = {15},
year = {2022}, 709 709 year = {2022},
URL = {https://eric.ed.gov/?q=Artificial+Intelligence+in+Education%3a+AIEd+for+Personalised+Learning+Pathways&id=EJ1373006}, 710 710 URL = {https://eric.ed.gov/?q=Artificial+Intelligence+in+Education%3a+AIEd+for+Personalised+Learning+Pathways&id=EJ1373006},
language={English}, 711 711 language={English},
type={article}, 712 712 type={article},
abstract = {Artificial intelligence is the driving force of change focusing on the needs and demands of the student. The research explores Artificial Intelligence in Education (AIEd) for building personalised learning systems for students. The research investigates and proposes a framework for AIEd: social networking sites and chatbots, expert systems for education, intelligent mentors and agents, machine learning, personalised educational systems and virtual educational environments. These technologies help educators to develop and introduce personalised approaches to master new knowledge and develop professional competencies. The research presents a case study of AIEd implementation in education. The scholars conducted the experiment in educational establishments using artificial intelligence in the curriculum. The scholars surveyed 184 second-year students of the Institute of Pedagogy and Psychology at the Abay Kazakh National Pedagogical University and the Kuban State Technological University to collect the data. The scholars considered the collective group discussions regarding the application of artificial intelligence in education to improve the effectiveness of learning. The research identified key advantages to creating personalised learning pathways such as access to training in 24/7 mode, training in virtual contexts, adaptation of educational content to personal needs of students, real-time and regular feedback, improvements in the educational process and mental stimulations. The proposed education paradigm reflects the increasing role of artificial intelligence in socio-economic life, the social and ethical concerns artificial intelligence may pose to humanity and its role in the digitalisation of education. The current article may be used as a theoretical framework for many educational institutions planning to exploit the capabilities of artificial intelligence in their adaptation to personalized learning.} 713 713 abstract = {Artificial intelligence is the driving force of change focusing on the needs and demands of the student. The research explores Artificial Intelligence in Education (AIEd) for building personalised learning systems for students. The research investigates and proposes a framework for AIEd: social networking sites and chatbots, expert systems for education, intelligent mentors and agents, machine learning, personalised educational systems and virtual educational environments. These technologies help educators to develop and introduce personalised approaches to master new knowledge and develop professional competencies. The research presents a case study of AIEd implementation in education. The scholars conducted the experiment in educational establishments using artificial intelligence in the curriculum. The scholars surveyed 184 second-year students of the Institute of Pedagogy and Psychology at the Abay Kazakh National Pedagogical University and the Kuban State Technological University to collect the data. The scholars considered the collective group discussions regarding the application of artificial intelligence in education to improve the effectiveness of learning. The research identified key advantages to creating personalised learning pathways such as access to training in 24/7 mode, training in virtual contexts, adaptation of educational content to personal needs of students, real-time and regular feedback, improvements in the educational process and mental stimulations. The proposed education paradigm reflects the increasing role of artificial intelligence in socio-economic life, the social and ethical concerns artificial intelligence may pose to humanity and its role in the digitalisation of education. The current article may be used as a theoretical framework for many educational institutions planning to exploit the capabilities of artificial intelligence in their adaptation to personalized learning.}
} 714 714 }
715 715
@article{Auer, 716 716 @article{Auer,
title = {From monolithic systems to Microservices: An assessment framework}, 717 717 title = {From monolithic systems to Microservices: An assessment framework},
journal = {Information and Software Technology}, 718 718 journal = {Information and Software Technology},
volume = {137}, 719 719 volume = {137},
pages = {106600}, 720 720 pages = {106600},
year = {2021}, 721 721 year = {2021},
issn = {0950-5849}, 722 722 issn = {0950-5849},
doi = {https://doi.org/10.1016/j.infsof.2021.106600}, 723 723 doi = {https://doi.org/10.1016/j.infsof.2021.106600},
url = {https://www.sciencedirect.com/science/article/pii/S0950584921000793}, 724 724 url = {https://www.sciencedirect.com/science/article/pii/S0950584921000793},
author = {Florian Auer and Valentina Lenarduzzi and Michael Felderer and Davide Taibi}, 725 725 author = {Florian Auer and Valentina Lenarduzzi and Michael Felderer and Davide Taibi},
keywords = {Microservices, Cloud migration, Software measurement}, 726 726 keywords = {Microservices, Cloud migration, Software measurement},
abstract = {Context: 727 727 abstract = {Context:
Re-architecting monolithic systems with Microservices-based architecture is a common trend. Various companies are migrating to Microservices for different reasons. However, making such an important decision like re-architecting an entire system must be based on real facts and not only on gut feelings. 728 728 Re-architecting monolithic systems with Microservices-based architecture is a common trend. Various companies are migrating to Microservices for different reasons. However, making such an important decision like re-architecting an entire system must be based on real facts and not only on gut feelings.
Objective: 729 729 Objective:
The goal of this work is to propose an evidence-based decision support framework for companies that need to migrate to Microservices, based on the analysis of a set of characteristics and metrics they should collect before re-architecting their monolithic system. 730 730 The goal of this work is to propose an evidence-based decision support framework for companies that need to migrate to Microservices, based on the analysis of a set of characteristics and metrics they should collect before re-architecting their monolithic system.
Method: 731 731 Method:
We conducted a survey done in the form of interviews with professionals to derive the assessment framework based on Grounded Theory. 732 732 We conducted a survey done in the form of interviews with professionals to derive the assessment framework based on Grounded Theory.
Results: 733 733 Results:
We identified a set consisting of information and metrics that companies can use to decide whether to migrate to Microservices or not. The proposed assessment framework, based on the aforementioned metrics, could be useful for companies if they need to migrate to Microservices and do not want to run the risk of failing to consider some important information.} 734 734 We identified a set consisting of information and metrics that companies can use to decide whether to migrate to Microservices or not. The proposed assessment framework, based on the aforementioned metrics, could be useful for companies if they need to migrate to Microservices and do not want to run the risk of failing to consider some important information.}
} 735 735 }
736 736
@Article{jmse10040464, 737 737 @Article{jmse10040464,
AUTHOR = {Zuluaga, Carlos A. and Aristizábal, Luis M. and Rúa, Santiago and Franco, Diego A. and Osorio, Dorie A. and Vásquez, Rafael E.}, 738 738 AUTHOR = {Zuluaga, Carlos A. and Aristizábal, Luis M. and Rúa, Santiago and Franco, Diego A. and Osorio, Dorie A. and Vásquez, Rafael E.},
TITLE = {Development of a Modular Software Architecture for Underwater Vehicles Using Systems Engineering}, 739 739 TITLE = {Development of a Modular Software Architecture for Underwater Vehicles Using Systems Engineering},
JOURNAL = {Journal of Marine Science and Engineering}, 740 740 JOURNAL = {Journal of Marine Science and Engineering},
VOLUME = {10}, 741 741 VOLUME = {10},
YEAR = {2022}, 742 742 YEAR = {2022},
NUMBER = {4}, 743 743 NUMBER = {4},
ARTICLE-NUMBER = {464}, 744 744 ARTICLE-NUMBER = {464},
URL = {https://www.mdpi.com/2077-1312/10/4/464}, 745 745 URL = {https://www.mdpi.com/2077-1312/10/4/464},
ISSN = {2077-1312}, 746 746 ISSN = {2077-1312},
ABSTRACT = {This paper addresses the development of a modular software architecture for the design/construction/operation of a remotely operated vehicle (ROV), based on systems engineering. First, systems engineering and the Vee model are presented with the objective of defining the interactions of the stakeholders with the software architecture development team and establishing the baselines that must be met in each development phase. In the development stage, the definition of the architecture and its connection with the hardware is presented, taking into account the use of the actor model, which represents the high-level software architecture used to solve concurrency problems. Subsequently, the structure of the classes is defined both at high and low levels in the instruments using the object-oriented programming paradigm. Finally, unit tests are developed for each component in the software architecture, quality assessment tests are implemented for system functions fulfillment, and a field sea trial for testing different modules of the vehicle is described. This approach is well suited for the development of complex systems such as marine vehicles and those systems which require scalability and modularity to add functionalities.}, 747 747 ABSTRACT = {This paper addresses the development of a modular software architecture for the design/construction/operation of a remotely operated vehicle (ROV), based on systems engineering. First, systems engineering and the Vee model are presented with the objective of defining the interactions of the stakeholders with the software architecture development team and establishing the baselines that must be met in each development phase. In the development stage, the definition of the architecture and its connection with the hardware is presented, taking into account the use of the actor model, which represents the high-level software architecture used to solve concurrency problems. Subsequently, the structure of the classes is defined both at high and low levels in the instruments using the object-oriented programming paradigm. Finally, unit tests are developed for each component in the software architecture, quality assessment tests are implemented for system functions fulfillment, and a field sea trial for testing different modules of the vehicle is described. This approach is well suited for the development of complex systems such as marine vehicles and those systems which require scalability and modularity to add functionalities.},
DOI = {10.3390/jmse10040464} 748 748 DOI = {10.3390/jmse10040464}
} 749 749 }
750 750
@article{doi:10.1177/1754337116651013, 751 751 @article{doi:10.1177/1754337116651013,
author = {Julien Henriet and Lang Christophe and Philippe Laurent}, 752 752 author = {Julien Henriet and Lang Christophe and Philippe Laurent},
title ={Artificial Intelligence-Virtual Trainer: An educative system based on artificial intelligence and designed to produce varied and consistent training lessons}, 753 753 title ={Artificial Intelligence-Virtual Trainer: An educative system based on artificial intelligence and designed to produce varied and consistent training lessons},
journal = {Proceedings of the Institution of Mechanical Engineers, Part P: Journal of Sports Engineering and Technology}, 754 754 journal = {Proceedings of the Institution of Mechanical Engineers, Part P: Journal of Sports Engineering and Technology},
volume = {231}, 755 755 volume = {231},
number = {2}, 756 756 number = {2},
pages = {110-124}, 757 757 pages = {110-124},
year = {2017}, 758 758 year = {2017},
doi = {10.1177/1754337116651013}, 759 759 doi = {10.1177/1754337116651013},
URL = {https://doi.org/10.1177/1754337116651013}, 760 760 URL = {https://doi.org/10.1177/1754337116651013},
eprint = {https://doi.org/10.1177/1754337116651013}, 761 761 eprint = {https://doi.org/10.1177/1754337116651013},
abstract = { AI-Virtual Trainer is an educative system using Artificial Intelligence to propose varied lessons to trainers. The agents of this multi-agent system apply case-based reasoning to build solutions by analogy. However, as required by the field, Artificial Intelligence-Virtual Trainer never proposes the same lesson twice, whereas the same objective may be set many times consecutively. The adaptation process of Artificial Intelligence-Virtual Trainer delivers an ordered set of exercises adapted to the objectives and sub-objectives chosen by trainers. This process has been enriched by including the notion of distance between exercises: the proposed tasks are not only appropriate but are hierarchically ordered. With this new version of the system, students are guided towards their objectives via an underlying theme. Finally, the agents responsible for the different parts of lessons collaborate with each other according to a dedicated protocol and decision-making policy since no exercise must appear more than once in the same lesson. The results prove that Artificial Intelligence-Virtual Trainer, however perfectible, meets the requirements of this field. } 762 762 abstract = { AI-Virtual Trainer is an educative system using Artificial Intelligence to propose varied lessons to trainers. The agents of this multi-agent system apply case-based reasoning to build solutions by analogy. However, as required by the field, Artificial Intelligence-Virtual Trainer never proposes the same lesson twice, whereas the same objective may be set many times consecutively. The adaptation process of Artificial Intelligence-Virtual Trainer delivers an ordered set of exercises adapted to the objectives and sub-objectives chosen by trainers. This process has been enriched by including the notion of distance between exercises: the proposed tasks are not only appropriate but are hierarchically ordered. With this new version of the system, students are guided towards their objectives via an underlying theme. Finally, the agents responsible for the different parts of lessons collaborate with each other according to a dedicated protocol and decision-making policy since no exercise must appear more than once in the same lesson. The results prove that Artificial Intelligence-Virtual Trainer, however perfectible, meets the requirements of this field. }
} 763 763 }
764 764
@InProceedings{10.1007/978-3-030-01081-2_9, 765 765 @InProceedings{10.1007/978-3-030-01081-2_9,
author="Henriet, Julien 766 766 author="Henriet, Julien
and Greffier, Fran{\c{c}}oise", 767 767 and Greffier, Fran{\c{c}}oise",
editor="Cox, Michael T. 768 768 editor="Cox, Michael T.
and Funk, Peter 769 769 and Funk, Peter
and Begum, Shahina", 770 770 and Begum, Shahina",
title="AI-VT: An Example of CBR that Generates a Variety of Solutions to the Same Problem", 771 771 title="AI-VT: An Example of CBR that Generates a Variety of Solutions to the Same Problem",
booktitle="Case-Based Reasoning Research and Development", 772 772 booktitle="Case-Based Reasoning Research and Development",
year="2018", 773 773 year="2018",
publisher="Springer International Publishing", 774 774 publisher="Springer International Publishing",
address="Cham", 775 775 address="Cham",
pages="124--139", 776 776 pages="124--139",
abstract="AI-Virtual Trainer (AI-VT) is an intelligent tutoring system based on case-based reasoning. AI-VT has been designed to generate personalised, varied, and consistent training sessions for learners. The AI-VT training sessions propose different exercises in regard to a capacity associated with sub-capacities. For example, in the field of training for algorithms, a capacity could be ``Use a control structure alternative'' and an associated sub-capacity could be ``Write a boolean condition''. AI-VT can elaborate a personalised list of exercises for each learner. One of the main requirements and challenges studied in this work is its ability to propose varied training sessions to the same learner for many weeks, which constitutes the challenge studied in our work. Indeed, if the same set of exercises is proposed time after time to learners, they will stop paying attention and lose motivation. Thus, even if the generation of training sessions is based on analogy and must integrate the repetition of some exercises, it also must introduce some diversity and AI-VT must deal with this diversity. In this paper, we have highlighted the fact that the retaining (or capitalisation) phase of CBR is of the utmost importance for diversity, and we have also highlighted that the equilibrium between repetition and variety depends on the abilities learned. This balance has an important impact on the retaining phase of AI-VT.", 777 777 abstract="AI-Virtual Trainer (AI-VT) is an intelligent tutoring system based on case-based reasoning. AI-VT has been designed to generate personalised, varied, and consistent training sessions for learners. The AI-VT training sessions propose different exercises in regard to a capacity associated with sub-capacities. For example, in the field of training for algorithms, a capacity could be ``Use a control structure alternative'' and an associated sub-capacity could be ``Write a boolean condition''. AI-VT can elaborate a personalised list of exercises for each learner. One of the main requirements and challenges studied in this work is its ability to propose varied training sessions to the same learner for many weeks, which constitutes the challenge studied in our work. Indeed, if the same set of exercises is proposed time after time to learners, they will stop paying attention and lose motivation. Thus, even if the generation of training sessions is based on analogy and must integrate the repetition of some exercises, it also must introduce some diversity and AI-VT must deal with this diversity. In this paper, we have highlighted the fact that the retaining (or capitalisation) phase of CBR is of the utmost importance for diversity, and we have also highlighted that the equilibrium between repetition and variety depends on the abilities learned. This balance has an important impact on the retaining phase of AI-VT.",
isbn="978-3-030-01081-2" 778 778 isbn="978-3-030-01081-2"
} 779 779 }
780 780
@article{BAKUROV2021100913, 781 781 @article{BAKUROV2021100913,
title = {Genetic programming for stacked generalization}, 782 782 title = {Genetic programming for stacked generalization},
journal = {Swarm and Evolutionary Computation}, 783 783 journal = {Swarm and Evolutionary Computation},
volume = {65}, 784 784 volume = {65},
pages = {100913}, 785 785 pages = {100913},
year = {2021}, 786 786 year = {2021},
issn = {2210-6502}, 787 787 issn = {2210-6502},
doi = {https://doi.org/10.1016/j.swevo.2021.100913}, 788 788 doi = {https://doi.org/10.1016/j.swevo.2021.100913},
url = {https://www.sciencedirect.com/science/article/pii/S2210650221000742}, 789 789 url = {https://www.sciencedirect.com/science/article/pii/S2210650221000742},
author = {Illya Bakurov and Mauro Castelli and Olivier Gau and Francesco Fontanella and Leonardo Vanneschi}, 790 790 author = {Illya Bakurov and Mauro Castelli and Olivier Gau and Francesco Fontanella and Leonardo Vanneschi},
keywords = {Genetic Programming, Stacking, Ensemble Learning, Stacked Generalization}, 791 791 keywords = {Genetic Programming, Stacking, Ensemble Learning, Stacked Generalization},
abstract = {In machine learning, ensemble techniques are widely used to improve the performance of both classification and regression systems. They combine the models generated by different learning algorithms, typically trained on different data subsets or with different parameters, to obtain more accurate models. Ensemble strategies range from simple voting rules to more complex and effective stacked approaches. They are based on adopting a meta-learner, i.e. a further learning algorithm, and are trained on the predictions provided by the single algorithms making up the ensemble. The paper aims at exploiting some of the most recent genetic programming advances in the context of stacked generalization. In particular, we investigate how the evolutionary demes despeciation initialization technique, ϵ-lexicase selection, geometric-semantic operators, and semantic stopping criterion, can be effectively used to improve GP-based systems’ performance for stacked generalization (a.k.a. stacking). The experiments, performed on a broad set of synthetic and real-world regression problems, confirm the effectiveness of the proposed approach.} 792 792 abstract = {In machine learning, ensemble techniques are widely used to improve the performance of both classification and regression systems. They combine the models generated by different learning algorithms, typically trained on different data subsets or with different parameters, to obtain more accurate models. Ensemble strategies range from simple voting rules to more complex and effective stacked approaches. They are based on adopting a meta-learner, i.e. a further learning algorithm, and are trained on the predictions provided by the single algorithms making up the ensemble. The paper aims at exploiting some of the most recent genetic programming advances in the context of stacked generalization. In particular, we investigate how the evolutionary demes despeciation initialization technique, ϵ-lexicase selection, geometric-semantic operators, and semantic stopping criterion, can be effectively used to improve GP-based systems’ performance for stacked generalization (a.k.a. stacking). The experiments, performed on a broad set of synthetic and real-world regression problems, confirm the effectiveness of the proposed approach.}
} 793 793 }
794 794
@article{Liang, 795 795 @article{Liang,
author={Liang Mang and Chang Tianpeng and An Bingxing and Duan Xinghai and Du Lili and Wang Xiaoqiao and Miao Jian and Xu Lingyang and Gao Xue and Zhang Lupei and Li Junya and Gao Huijiang}, 796 796 author={Liang Mang and Chang Tianpeng and An Bingxing and Duan Xinghai and Du Lili and Wang Xiaoqiao and Miao Jian and Xu Lingyang and Gao Xue and Zhang Lupei and Li Junya and Gao Huijiang},
Title={A Stacking Ensemble Learning Framework for Genomic Prediction}, 797 797 Title={A Stacking Ensemble Learning Framework for Genomic Prediction},
Journal={Frontiers in Genetics}, 798 798 Journal={Frontiers in Genetics},
year={2021}, 799 799 year={2021},
doi ={10.3389/fgene.2021.600040}, 800 800 doi ={10.3389/fgene.2021.600040},
PMID={33747037}, 801 801 PMID={33747037},
PMCID={PMC7969712} 802 802 PMCID={PMC7969712}
} 803 803 }
804 804
@Article{cmc.2023.033417, 805 805 @Article{cmc.2023.033417,
AUTHOR = {Jeonghoon Choi and Dongjun Suh and Marc-Oliver Otto}, 806 806 AUTHOR = {Jeonghoon Choi and Dongjun Suh and Marc-Oliver Otto},
TITLE = {Boosted Stacking Ensemble Machine Learning Method for Wafer Map Pattern Classification}, 807 807 TITLE = {Boosted Stacking Ensemble Machine Learning Method for Wafer Map Pattern Classification},
JOURNAL = {Computers, Materials \& Continua}, 808 808 JOURNAL = {Computers, Materials \& Continua},
VOLUME = {74}, 809 809 VOLUME = {74},
YEAR = {2023}, 810 810 YEAR = {2023},
NUMBER = {2}, 811 811 NUMBER = {2},
PAGES = {2945--2966}, 812 812 PAGES = {2945--2966},
URL = {http://www.techscience.com/cmc/v74n2/50296}, 813 813 URL = {http://www.techscience.com/cmc/v74n2/50296},
ISSN = {1546-2226}, 814 814 ISSN = {1546-2226},
ABSTRACT = {Recently, machine learning-based technologies have been developed to automate the classification of wafer map defect patterns during semiconductor manufacturing. The existing approaches used in the wafer map pattern classification include directly learning the image through a convolution neural network and applying the ensemble method after extracting image features. This study aims to classify wafer map defects more effectively and derive robust algorithms even for datasets with insufficient defect patterns. First, the number of defects during the actual process may be limited. Therefore, insufficient data are generated using convolutional auto-encoder (CAE), and the expanded data are verified using the evaluation technique of structural similarity index measure (SSIM). After extracting handcrafted features, a boosted stacking ensemble model that integrates the four base-level classifiers with the extreme gradient boosting classifier as a meta-level classifier is designed and built for training the model based on the expanded data for final prediction. Since the proposed algorithm shows better performance than those of existing ensemble classifiers even for insufficient defect patterns, the results of this study will contribute to improving the product quality and yield of the actual semiconductor manufacturing process.}, 815 815 ABSTRACT = {Recently, machine learning-based technologies have been developed to automate the classification of wafer map defect patterns during semiconductor manufacturing. The existing approaches used in the wafer map pattern classification include directly learning the image through a convolution neural network and applying the ensemble method after extracting image features. This study aims to classify wafer map defects more effectively and derive robust algorithms even for datasets with insufficient defect patterns. First, the number of defects during the actual process may be limited. Therefore, insufficient data are generated using convolutional auto-encoder (CAE), and the expanded data are verified using the evaluation technique of structural similarity index measure (SSIM). After extracting handcrafted features, a boosted stacking ensemble model that integrates the four base-level classifiers with the extreme gradient boosting classifier as a meta-level classifier is designed and built for training the model based on the expanded data for final prediction. Since the proposed algorithm shows better performance than those of existing ensemble classifiers even for insufficient defect patterns, the results of this study will contribute to improving the product quality and yield of the actual semiconductor manufacturing process.},
DOI = {10.32604/cmc.2023.033417} 816 816 DOI = {10.32604/cmc.2023.033417}
} 817 817 }
818 818
@ARTICLE{10.3389/fgene.2021.600040, 819 819 @ARTICLE{10.3389/fgene.2021.600040,
AUTHOR={Liang, Mang and Chang, Tianpeng and An, Bingxing and Duan, Xinghai and Du, Lili and Wang, Xiaoqiao and Miao, Jian and Xu, Lingyang and Gao, Xue and Zhang, Lupei and Li, Junya and Gao, Huijiang}, 820 820 AUTHOR={Liang, Mang and Chang, Tianpeng and An, Bingxing and Duan, Xinghai and Du, Lili and Wang, Xiaoqiao and Miao, Jian and Xu, Lingyang and Gao, Xue and Zhang, Lupei and Li, Junya and Gao, Huijiang},
TITLE={A Stacking Ensemble Learning Framework for Genomic Prediction}, 821 821 TITLE={A Stacking Ensemble Learning Framework for Genomic Prediction},
JOURNAL={Frontiers in Genetics}, 822 822 JOURNAL={Frontiers in Genetics},
VOLUME={12}, 823 823 VOLUME={12},
YEAR={2021}, 824 824 YEAR={2021},
URL={https://www.frontiersin.org/articles/10.3389/fgene.2021.600040}, 825 825 URL={https://www.frontiersin.org/articles/10.3389/fgene.2021.600040},
DOI={10.3389/fgene.2021.600040}, 826 826 DOI={10.3389/fgene.2021.600040},
ISSN={1664-8021}, 827 827 ISSN={1664-8021},
ABSTRACT={Machine learning (ML) is perhaps the most useful tool for the interpretation of large genomic datasets. However, the performance of a single machine learning method in genomic selection (GS) is currently unsatisfactory. To improve the genomic predictions, we constructed a stacking ensemble learning framework (SELF), integrating three machine learning methods, to predict genomic estimated breeding values (GEBVs). The present study evaluated the prediction ability of SELF by analyzing three real datasets, with different genetic architecture; comparing the prediction accuracy of SELF, base learners, genomic best linear unbiased prediction (GBLUP) and BayesB. For each trait, SELF performed better than base learners, which included support vector regression (SVR), kernel ridge regression (KRR) and elastic net (ENET). The prediction accuracy of SELF was, on average, 7.70% higher than GBLUP in three datasets. Except for the milk fat percentage (MFP) traits, of the German Holstein dairy cattle dataset, SELF was more robust than BayesB in all remaining traits. Therefore, we believed that SEFL has the potential to be promoted to estimate GEBVs in other animals and plants.} 828 828 ABSTRACT={Machine learning (ML) is perhaps the most useful tool for the interpretation of large genomic datasets. However, the performance of a single machine learning method in genomic selection (GS) is currently unsatisfactory. To improve the genomic predictions, we constructed a stacking ensemble learning framework (SELF), integrating three machine learning methods, to predict genomic estimated breeding values (GEBVs). The present study evaluated the prediction ability of SELF by analyzing three real datasets, with different genetic architecture; comparing the prediction accuracy of SELF, base learners, genomic best linear unbiased prediction (GBLUP) and BayesB. For each trait, SELF performed better than base learners, which included support vector regression (SVR), kernel ridge regression (KRR) and elastic net (ENET). The prediction accuracy of SELF was, on average, 7.70% higher than GBLUP in three datasets. Except for the milk fat percentage (MFP) traits, of the German Holstein dairy cattle dataset, SELF was more robust than BayesB in all remaining traits. Therefore, we believed that SEFL has the potential to be promoted to estimate GEBVs in other animals and plants.}
} 829 829 }
830 830
@article{DIDDEN2023338, 831 831 @article{DIDDEN2023338,
title = {Decentralized learning multi-agent system for online machine shop scheduling problem}, 832 832 title = {Decentralized learning multi-agent system for online machine shop scheduling problem},
journal = {Journal of Manufacturing Systems}, 833 833 journal = {Journal of Manufacturing Systems},
volume = {67}, 834 834 volume = {67},
pages = {338-360}, 835 835 pages = {338-360},
year = {2023}, 836 836 year = {2023},
issn = {0278-6125}, 837 837 issn = {0278-6125},
doi = {https://doi.org/10.1016/j.jmsy.2023.02.004}, 838 838 doi = {https://doi.org/10.1016/j.jmsy.2023.02.004},
url = {https://www.sciencedirect.com/science/article/pii/S0278612523000286}, 839 839 url = {https://www.sciencedirect.com/science/article/pii/S0278612523000286},
author = {Jeroen B.H.C. Didden and Quang-Vinh Dang and Ivo J.B.F. Adan}, 840 840 author = {Jeroen B.H.C. Didden and Quang-Vinh Dang and Ivo J.B.F. Adan},
keywords = {Multi-agent system, Decentralized systems, Learning algorithm, Industry 4.0, Smart manufacturing}, 841 841 keywords = {Multi-agent system, Decentralized systems, Learning algorithm, Industry 4.0, Smart manufacturing},
abstract = {Customer profiles have rapidly changed over the past few years, with products being requested with more customization and with lower demand. In addition to the advances in technologies owing to Industry 4.0, manufacturers explore autonomous and smart factories. This paper proposes a decentralized multi-agent system (MAS), including intelligent agents that can respond to their environment autonomously through learning capabilities, to cope with an online machine shop scheduling problem. In the proposed system, agents participate in auctions to receive jobs to process, learn how to bid for jobs correctly, and decide when to start processing a job. The objective is to minimize the mean weighted tardiness of all jobs. In contrast to the existing literature, the proposed MAS is assessed on its learning capabilities, producing novel insights concerning what is relevant for learning, when re-learning is needed, and system response to dynamic events (such as rush jobs, increase in processing time, and machine unavailability). Computational experiments also reveal the outperformance of the proposed MAS to other multi-agent systems by at least 25% and common dispatching rules in mean weighted tardiness, as well as other performance measures.} 842 842 abstract = {Customer profiles have rapidly changed over the past few years, with products being requested with more customization and with lower demand. In addition to the advances in technologies owing to Industry 4.0, manufacturers explore autonomous and smart factories. This paper proposes a decentralized multi-agent system (MAS), including intelligent agents that can respond to their environment autonomously through learning capabilities, to cope with an online machine shop scheduling problem. In the proposed system, agents participate in auctions to receive jobs to process, learn how to bid for jobs correctly, and decide when to start processing a job. The objective is to minimize the mean weighted tardiness of all jobs. In contrast to the existing literature, the proposed MAS is assessed on its learning capabilities, producing novel insights concerning what is relevant for learning, when re-learning is needed, and system response to dynamic events (such as rush jobs, increase in processing time, and machine unavailability). Computational experiments also reveal the outperformance of the proposed MAS to other multi-agent systems by at least 25% and common dispatching rules in mean weighted tardiness, as well as other performance measures.}
} 843 843 }
844 844
@article{REZAEI20221, 845 845 @article{REZAEI20221,
title = {A Biased Inferential Naivety learning model for a network of agents}, 846 846 title = {A Biased Inferential Naivety learning model for a network of agents},
journal = {Cognitive Systems Research}, 847 847 journal = {Cognitive Systems Research},
volume = {76}, 848 848 volume = {76},
pages = {1-12}, 849 849 pages = {1-12},
year = {2022}, 850 850 year = {2022},
issn = {1389-0417}, 851 851 issn = {1389-0417},
doi = {https://doi.org/10.1016/j.cogsys.2022.07.001}, 852 852 doi = {https://doi.org/10.1016/j.cogsys.2022.07.001},
url = {https://www.sciencedirect.com/science/article/pii/S1389041722000298}, 853 853 url = {https://www.sciencedirect.com/science/article/pii/S1389041722000298},
author = {Zeinab Rezaei and Saeed Setayeshi and Ebrahim Mahdipour}, 854 854 author = {Zeinab Rezaei and Saeed Setayeshi and Ebrahim Mahdipour},
keywords = {Bayesian decision making, Heuristic method, Inferential naivety assumption, Observational learning, Social learning}, 855 855 keywords = {Bayesian decision making, Heuristic method, Inferential naivety assumption, Observational learning, Social learning},
abstract = {We propose a Biased Inferential Naivety social learning model. In this model, a group of agents tries to determine the true state of the world and make the best possible decisions. The agents have limited computational abilities. They receive noisy private signals about the true state and observe the history of their neighbors' decisions. The proposed model is rooted in the Bayesian method but avoids the complexity of fully Bayesian inference. In our model, the role of knowledge obtained from social observations is separated from the knowledge obtained from private observations. Therefore, the Bayesian inferences on social observations are approximated using inferential naivety assumption, while purely Bayesian inferences are made on private observations. The reduction of herd behavior is another innovation of the proposed model. This advantage is achieved by reducing the effect of social observations on agents' beliefs over time. Therefore, all the agents learn the truth, and the correct consensus is achieved effectively. In this model, using two cognitive biases, there is heterogeneity in agents' behaviors. Therefore, the growth of beliefs and the learning speed can be improved in different situations. Several Monte Carlo simulations confirm the features of the proposed model. The conditions under which the proposed model leads to asymptotic learning are proved.} 856 856 abstract = {We propose a Biased Inferential Naivety social learning model. In this model, a group of agents tries to determine the true state of the world and make the best possible decisions. The agents have limited computational abilities. They receive noisy private signals about the true state and observe the history of their neighbors' decisions. The proposed model is rooted in the Bayesian method but avoids the complexity of fully Bayesian inference. In our model, the role of knowledge obtained from social observations is separated from the knowledge obtained from private observations. Therefore, the Bayesian inferences on social observations are approximated using inferential naivety assumption, while purely Bayesian inferences are made on private observations. The reduction of herd behavior is another innovation of the proposed model. This advantage is achieved by reducing the effect of social observations on agents' beliefs over time. Therefore, all the agents learn the truth, and the correct consensus is achieved effectively. In this model, using two cognitive biases, there is heterogeneity in agents' behaviors. Therefore, the growth of beliefs and the learning speed can be improved in different situations. Several Monte Carlo simulations confirm the features of the proposed model. The conditions under which the proposed model leads to asymptotic learning are proved.}
} 857 857 }
858 858
@article{KAMALI2023110242, 859 859 @article{KAMALI2023110242,
title = {An immune inspired multi-agent system for dynamic multi-objective optimization}, 860 860 title = {An immune inspired multi-agent system for dynamic multi-objective optimization},
journal = {Knowledge-Based Systems}, 861 861 journal = {Knowledge-Based Systems},
volume = {262}, 862 862 volume = {262},
pages = {110242}, 863 863 pages = {110242},
year = {2023}, 864 864 year = {2023},
issn = {0950-7051}, 865 865 issn = {0950-7051},
doi = {https://doi.org/10.1016/j.knosys.2022.110242}, 866 866 doi = {https://doi.org/10.1016/j.knosys.2022.110242},
url = {https://www.sciencedirect.com/science/article/pii/S0950705122013387}, 867 867 url = {https://www.sciencedirect.com/science/article/pii/S0950705122013387},
author = {Seyed Ruhollah Kamali and Touraj Banirostam and Homayun Motameni and Mohammad Teshnehlab}, 868 868 author = {Seyed Ruhollah Kamali and Touraj Banirostam and Homayun Motameni and Mohammad Teshnehlab},
keywords = {Immune inspired multi-agent system, Dynamic multi-objective optimization, Severe and frequent changes}, 869 869 keywords = {Immune inspired multi-agent system, Dynamic multi-objective optimization, Severe and frequent changes},
abstract = {In this research, an immune inspired multi-agent system (IMAS) is proposed to solve optimization problems in dynamic and multi-objective environments. The proposed IMAS uses artificial immune system metaphors to shape the local behaviors of agents to detect environmental changes, generate Pareto optimal solutions, and react to the dynamics of the problem environment. Apart from that, agents enhance their adaptive capacity in dealing with environmental changes to find the global optimum, with a hierarchical structure without any central control. This study used a combination of diversity-, multi-population- and memory-based approaches to perform better in multi-objective environments with severe and frequent changes. The proposed IMAS is compared with six state-of-the-art algorithms on various benchmark problems. The results indicate its superiority in many of the experiments.} 870 870 abstract = {In this research, an immune inspired multi-agent system (IMAS) is proposed to solve optimization problems in dynamic and multi-objective environments. The proposed IMAS uses artificial immune system metaphors to shape the local behaviors of agents to detect environmental changes, generate Pareto optimal solutions, and react to the dynamics of the problem environment. Apart from that, agents enhance their adaptive capacity in dealing with environmental changes to find the global optimum, with a hierarchical structure without any central control. This study used a combination of diversity-, multi-population- and memory-based approaches to perform better in multi-objective environments with severe and frequent changes. The proposed IMAS is compared with six state-of-the-art algorithms on various benchmark problems. The results indicate its superiority in many of the experiments.}
} 871 871 }
872 872
@article{ZHANG2023110564, 873 873 @article{ZHANG2023110564,
title = {A novel human learning optimization algorithm with Bayesian inference learning}, 874 874 title = {A novel human learning optimization algorithm with Bayesian inference learning},
journal = {Knowledge-Based Systems}, 875 875 journal = {Knowledge-Based Systems},
volume = {271}, 876 876 volume = {271},
pages = {110564}, 877 877 pages = {110564},
year = {2023}, 878 878 year = {2023},
issn = {0950-7051}, 879 879 issn = {0950-7051},
doi = {https://doi.org/10.1016/j.knosys.2023.110564}, 880 880 doi = {https://doi.org/10.1016/j.knosys.2023.110564},
url = {https://www.sciencedirect.com/science/article/pii/S0950705123003143}, 881 881 url = {https://www.sciencedirect.com/science/article/pii/S0950705123003143},
author = {Pinggai Zhang and Ling Wang and Zixiang Fei and Lisheng Wei and Minrui Fei and Muhammad Ilyas Menhas}, 882 882 author = {Pinggai Zhang and Ling Wang and Zixiang Fei and Lisheng Wei and Minrui Fei and Muhammad Ilyas Menhas},
keywords = {Human learning optimization, Meta-heuristic, Bayesian inference, Bayesian inference learning, Individual learning, Social learning}, 883 883 keywords = {Human learning optimization, Meta-heuristic, Bayesian inference, Bayesian inference learning, Individual learning, Social learning},
abstract = {Humans perform Bayesian inference in a wide variety of tasks, which can help people make selection decisions effectively and therefore enhances learning efficiency and accuracy. Inspired by this fact, this paper presents a novel human learning optimization algorithm with Bayesian inference learning (HLOBIL), in which a Bayesian inference learning operator (BILO) is developed to utilize the inference strategy for enhancing learning efficiency. The in-depth analysis shows that the proposed BILO can efficiently improve the exploitation ability of the algorithm as it can achieve the optimal values and retrieve the optimal information with the accumulated search information. Besides, the exploration ability of HLOBIL is also strengthened by the inborn characteristics of Bayesian inference. The experimental results demonstrate that the developed HLOBIL is superior to previous HLO variants and other state-of-art algorithms with its improved exploitation and exploration abilities.} 884 884 abstract = {Humans perform Bayesian inference in a wide variety of tasks, which can help people make selection decisions effectively and therefore enhances learning efficiency and accuracy. Inspired by this fact, this paper presents a novel human learning optimization algorithm with Bayesian inference learning (HLOBIL), in which a Bayesian inference learning operator (BILO) is developed to utilize the inference strategy for enhancing learning efficiency. The in-depth analysis shows that the proposed BILO can efficiently improve the exploitation ability of the algorithm as it can achieve the optimal values and retrieve the optimal information with the accumulated search information. Besides, the exploration ability of HLOBIL is also strengthened by the inborn characteristics of Bayesian inference. The experimental results demonstrate that the developed HLOBIL is superior to previous HLO variants and other state-of-art algorithms with its improved exploitation and exploration abilities.}
} 885 885 }
886 886
@article{HIPOLITO2023103510, 887 887 @article{HIPOLITO2023103510,
title = {Breaking boundaries: The Bayesian Brain Hypothesis for perception and prediction}, 888 888 title = {Breaking boundaries: The Bayesian Brain Hypothesis for perception and prediction},
journal = {Consciousness and Cognition}, 889 889 journal = {Consciousness and Cognition},
volume = {111}, 890 890 volume = {111},
pages = {103510}, 891 891 pages = {103510},
year = {2023}, 892 892 year = {2023},
issn = {1053-8100}, 893 893 issn = {1053-8100},
doi = {https://doi.org/10.1016/j.concog.2023.103510}, 894 894 doi = {https://doi.org/10.1016/j.concog.2023.103510},
url = {https://www.sciencedirect.com/science/article/pii/S1053810023000478}, 895 895 url = {https://www.sciencedirect.com/science/article/pii/S1053810023000478},
author = {Inês Hipólito and Michael Kirchhoff}, 896 896 author = {Inês Hipólito and Michael Kirchhoff},
keywords = {Bayesian Brain Hypothesis, Modularity of the Mind, Cognitive processes, Informational boundaries}, 897 897 keywords = {Bayesian Brain Hypothesis, Modularity of the Mind, Cognitive processes, Informational boundaries},
abstract = {This special issue aims to provide a comprehensive overview of the current state of the Bayesian Brain Hypothesis and its standing across neuroscience, cognitive science and the philosophy of cognitive science. By gathering cutting-edge research from leading experts, this issue seeks to showcase the latest advancements in our understanding of the Bayesian brain, as well as its potential implications for future research in perception, cognition, and motor control. A special focus to achieve this aim is adopted in this special issue, as it seeks to explore the relation between two seemingly incompatible frameworks for the understanding of cognitive structure and function: the Bayesian Brain Hypothesis and the Modularity Theory of the Mind. In assessing the compatibility between these theories, the contributors to this special issue open up new pathways of thinking and advance our understanding of cognitive processes.} 898 898 abstract = {This special issue aims to provide a comprehensive overview of the current state of the Bayesian Brain Hypothesis and its standing across neuroscience, cognitive science and the philosophy of cognitive science. By gathering cutting-edge research from leading experts, this issue seeks to showcase the latest advancements in our understanding of the Bayesian brain, as well as its potential implications for future research in perception, cognition, and motor control. A special focus to achieve this aim is adopted in this special issue, as it seeks to explore the relation between two seemingly incompatible frameworks for the understanding of cognitive structure and function: the Bayesian Brain Hypothesis and the Modularity Theory of the Mind. In assessing the compatibility between these theories, the contributors to this special issue open up new pathways of thinking and advance our understanding of cognitive processes.}
} 899 899 }
900 900
@article{LI2023424, 901 901 @article{LI2023424,
title = {Multi-agent evolution reinforcement learning method for machining parameters optimization based on bootstrap aggregating graph attention network simulated environment}, 902 902 title = {Multi-agent evolution reinforcement learning method for machining parameters optimization based on bootstrap aggregating graph attention network simulated environment},
journal = {Journal of Manufacturing Systems}, 903 903 journal = {Journal of Manufacturing Systems},
volume = {67}, 904 904 volume = {67},
pages = {424-438}, 905 905 pages = {424-438},
year = {2023}, 906 906 year = {2023},
issn = {0278-6125}, 907 907 issn = {0278-6125},
doi = {https://doi.org/10.1016/j.jmsy.2023.02.015}, 908 908 doi = {https://doi.org/10.1016/j.jmsy.2023.02.015},
url = {https://www.sciencedirect.com/science/article/pii/S0278612523000390}, 909 909 url = {https://www.sciencedirect.com/science/article/pii/S0278612523000390},
author = {Weiye Li and Songping He and Xinyong Mao and Bin Li and Chaochao Qiu and Jinwen Yu and Fangyu Peng and Xin Tan}, 910 910 author = {Weiye Li and Songping He and Xinyong Mao and Bin Li and Chaochao Qiu and Jinwen Yu and Fangyu Peng and Xin Tan},
keywords = {Surface roughness, Cutting efficiency, Machining parameters optimization, Graph attention network, Multi-agent reinforcement learning, Evolutionary learning}, 911 911 keywords = {Surface roughness, Cutting efficiency, Machining parameters optimization, Graph attention network, Multi-agent reinforcement learning, Evolutionary learning},
abstract = {Improving machining quality and production efficiency is the focus of the manufacturing industry. How to obtain efficient machining parameters under multiple constraints such as machining quality is a severe challenge for manufacturing industry. In this paper, a multi-agent evolutionary reinforcement learning method (MAERL) is proposed to optimize the machining parameters for high quality and high efficiency machining by combining the graph neural network and reinforcement learning. Firstly, a bootstrap aggregating graph attention network (Bagging-GAT) based roughness estimation method for machined surface is proposed, which combines the structural knowledge between machining parameters and vibration features. Secondly, a mathematical model of machining parameters optimization problem is established, which is formalized into Markov decision process (MDP), and a multi-agent reinforcement learning method is proposed to solve the MDP problem, and evolutionary learning is introduced to improve the stability of multi-agent training. Finally, a series of experiments were carried out on the commutator production line, and the results show that the proposed Bagging-GAT-based method can improve the prediction effect by about 25% in the case of small samples, and the MAERL-based optimization method can better deal with the coupling problem of reward function in the optimization process. Compared with the classical optimization method, the optimization effect is improved by 13% and a lot of optimization time is saved.} 912 912 abstract = {Improving machining quality and production efficiency is the focus of the manufacturing industry. How to obtain efficient machining parameters under multiple constraints such as machining quality is a severe challenge for manufacturing industry. In this paper, a multi-agent evolutionary reinforcement learning method (MAERL) is proposed to optimize the machining parameters for high quality and high efficiency machining by combining the graph neural network and reinforcement learning. Firstly, a bootstrap aggregating graph attention network (Bagging-GAT) based roughness estimation method for machined surface is proposed, which combines the structural knowledge between machining parameters and vibration features. Secondly, a mathematical model of machining parameters optimization problem is established, which is formalized into Markov decision process (MDP), and a multi-agent reinforcement learning method is proposed to solve the MDP problem, and evolutionary learning is introduced to improve the stability of multi-agent training. Finally, a series of experiments were carried out on the commutator production line, and the results show that the proposed Bagging-GAT-based method can improve the prediction effect by about 25% in the case of small samples, and the MAERL-based optimization method can better deal with the coupling problem of reward function in the optimization process. Compared with the classical optimization method, the optimization effect is improved by 13% and a lot of optimization time is saved.}
} 913 913 }
914 914
@inproceedings{10.1145/3290605.3300912, 915 915 @inproceedings{10.1145/3290605.3300912,
author = {Kim, Yea-Seul and Walls, Logan A. and Krafft, Peter and Hullman, Jessica}, 916 916 author = {Kim, Yea-Seul and Walls, Logan A. and Krafft, Peter and Hullman, Jessica},
title = {A Bayesian Cognition Approach to Improve Data Visualization}, 917 917 title = {A Bayesian Cognition Approach to Improve Data Visualization},
year = {2019}, 918 918 year = {2019},
isbn = {9781450359702}, 919 919 isbn = {9781450359702},
publisher = {Association for Computing Machinery}, 920 920 publisher = {Association for Computing Machinery},
address = {New York, NY, USA}, 921 921 address = {New York, NY, USA},
url = {https://doi.org/10.1145/3290605.3300912}, 922 922 url = {https://doi.org/10.1145/3290605.3300912},
doi = {10.1145/3290605.3300912}, 923 923 doi = {10.1145/3290605.3300912},
abstract = {People naturally bring their prior beliefs to bear on how they interpret the new information, yet few formal models exist for accounting for the influence of users' prior beliefs in interactions with data presentations like visualizations. We demonstrate a Bayesian cognitive model for understanding how people interpret visualizations in light of prior beliefs and show how this model provides a guide for improving visualization evaluation. In a first study, we show how applying a Bayesian cognition model to a simple visualization scenario indicates that people's judgments are consistent with a hypothesis that they are doing approximate Bayesian inference. In a second study, we evaluate how sensitive our observations of Bayesian behavior are to different techniques for eliciting people subjective distributions, and to different datasets. We find that people don't behave consistently with Bayesian predictions for large sample size datasets, and this difference cannot be explained by elicitation technique. In a final study, we show how normative Bayesian inference can be used as an evaluation framework for visualizations, including of uncertainty.}, 924 924 abstract = {People naturally bring their prior beliefs to bear on how they interpret the new information, yet few formal models exist for accounting for the influence of users' prior beliefs in interactions with data presentations like visualizations. We demonstrate a Bayesian cognitive model for understanding how people interpret visualizations in light of prior beliefs and show how this model provides a guide for improving visualization evaluation. In a first study, we show how applying a Bayesian cognition model to a simple visualization scenario indicates that people's judgments are consistent with a hypothesis that they are doing approximate Bayesian inference. In a second study, we evaluate how sensitive our observations of Bayesian behavior are to different techniques for eliciting people subjective distributions, and to different datasets. We find that people don't behave consistently with Bayesian predictions for large sample size datasets, and this difference cannot be explained by elicitation technique. In a final study, we show how normative Bayesian inference can be used as an evaluation framework for visualizations, including of uncertainty.},
booktitle = {Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems}, 925 925 booktitle = {Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems},
pages = {1–14}, 926 926 pages = {1–14},
numpages = {14}, 927 927 numpages = {14},
keywords = {bayesian cognition, uncertainty elicitation, visualization}, 928 928 keywords = {bayesian cognition, uncertainty elicitation, visualization},
location = {Glasgow, Scotland Uk}, 929 929 location = {Glasgow, Scotland Uk},
series = {CHI '19} 930 930 series = {CHI '19}
} 931 931 }
932 932
@article{DYER2024104827, 933 933 @article{DYER2024104827,
title = {Black-box Bayesian inference for agent-based models}, 934 934 title = {Black-box Bayesian inference for agent-based models},
journal = {Journal of Economic Dynamics and Control}, 935 935 journal = {Journal of Economic Dynamics and Control},
volume = {161}, 936 936 volume = {161},
pages = {104827}, 937 937 pages = {104827},
year = {2024}, 938 938 year = {2024},
issn = {0165-1889}, 939 939 issn = {0165-1889},
doi = {https://doi.org/10.1016/j.jedc.2024.104827}, 940 940 doi = {https://doi.org/10.1016/j.jedc.2024.104827},
url = {https://www.sciencedirect.com/science/article/pii/S0165188924000198}, 941 941 url = {https://www.sciencedirect.com/science/article/pii/S0165188924000198},
author = {Joel Dyer and Patrick Cannon and J. Doyne Farmer and Sebastian M. Schmon}, 942 942 author = {Joel Dyer and Patrick Cannon and J. Doyne Farmer and Sebastian M. Schmon},
keywords = {Agent-based models, Bayesian inference, Neural networks, Parameter estimation, Simulation-based inference, Time series}, 943 943 keywords = {Agent-based models, Bayesian inference, Neural networks, Parameter estimation, Simulation-based inference, Time series},
abstract = {Simulation models, in particular agent-based models, are gaining popularity in economics and the social sciences. The considerable flexibility they offer, as well as their capacity to reproduce a variety of empirically observed behaviours of complex systems, give them broad appeal, and the increasing availability of cheap computing power has made their use feasible. Yet a widespread adoption in real-world modelling and decision-making scenarios has been hindered by the difficulty of performing parameter estimation for such models. In general, simulation models lack a tractable likelihood function, which precludes a straightforward application of standard statistical inference techniques. A number of recent works have sought to address this problem through the application of likelihood-free inference techniques, in which parameter estimates are determined by performing some form of comparison between the observed data and simulation output. However, these approaches are (a) founded on restrictive assumptions, and/or (b) typically require many hundreds of thousands of simulations. These qualities make them unsuitable for large-scale simulations in economics and the social sciences, and can cast doubt on the validity of these inference methods in such scenarios. In this paper, we investigate the efficacy of two classes of simulation-efficient black-box approximate Bayesian inference methods that have recently drawn significant attention within the probabilistic machine learning community: neural posterior estimation and neural density ratio estimation. We present a number of benchmarking experiments in which we demonstrate that neural network-based black-box methods provide state of the art parameter inference for economic simulation models, and crucially are compatible with generic multivariate or even non-Euclidean time-series data. In addition, we suggest appropriate assessment criteria for use in future benchmarking of approximate Bayesian inference procedures for simulation models in economics and the social sciences.} 944 944 abstract = {Simulation models, in particular agent-based models, are gaining popularity in economics and the social sciences. The considerable flexibility they offer, as well as their capacity to reproduce a variety of empirically observed behaviours of complex systems, give them broad appeal, and the increasing availability of cheap computing power has made their use feasible. Yet a widespread adoption in real-world modelling and decision-making scenarios has been hindered by the difficulty of performing parameter estimation for such models. In general, simulation models lack a tractable likelihood function, which precludes a straightforward application of standard statistical inference techniques. A number of recent works have sought to address this problem through the application of likelihood-free inference techniques, in which parameter estimates are determined by performing some form of comparison between the observed data and simulation output. However, these approaches are (a) founded on restrictive assumptions, and/or (b) typically require many hundreds of thousands of simulations. These qualities make them unsuitable for large-scale simulations in economics and the social sciences, and can cast doubt on the validity of these inference methods in such scenarios. In this paper, we investigate the efficacy of two classes of simulation-efficient black-box approximate Bayesian inference methods that have recently drawn significant attention within the probabilistic machine learning community: neural posterior estimation and neural density ratio estimation. We present a number of benchmarking experiments in which we demonstrate that neural network-based black-box methods provide state of the art parameter inference for economic simulation models, and crucially are compatible with generic multivariate or even non-Euclidean time-series data. In addition, we suggest appropriate assessment criteria for use in future benchmarking of approximate Bayesian inference procedures for simulation models in economics and the social sciences.}
} 945 945 }
946 946
@Article{Nikpour2021, 947 947 @Article{Nikpour2021,
author={Nikpour, Hoda 948 948 author={Nikpour, Hoda
and Aamodt, Agnar}, 949 949 and Aamodt, Agnar},
title={Inference and reasoning in a Bayesian knowledge-intensive CBR system}, 950 950 title={Inference and reasoning in a Bayesian knowledge-intensive CBR system},
journal={Progress in Artificial Intelligence}, 951 951 journal={Progress in Artificial Intelligence},
year={2021}, 952 952 year={2021},
month={Mar}, 953 953 month={Mar},
day={01}, 954 954 day={01},
volume={10}, 955 955 volume={10},
number={1}, 956 956 number={1},
pages={49-63}, 957 957 pages={49-63},
abstract={This paper presents the inference and reasoning methods in a Bayesian supported knowledge-intensive case-based reasoning (CBR) system called BNCreek. The inference and reasoning process in this system is a combination of three methods. The semantic network inference methods and the CBR method are employed to handle the difficulties of inferencing and reasoning in uncertain domains. The Bayesian network inference methods are employed to make the process more accurate. An experiment from oil well drilling as a complex and uncertain application domain is conducted. The system is evaluated against expert estimations and compared with seven other corresponding systems. The normalized discounted cumulative gain (NDCG) as a rank-based metric, the weighted error (WE), and root-square error (RSE) as the statistical metrics are employed to evaluate different aspects of the system capabilities. The results show the efficiency of the developed inference and reasoning methods.}, 958 958 abstract={This paper presents the inference and reasoning methods in a Bayesian supported knowledge-intensive case-based reasoning (CBR) system called BNCreek. The inference and reasoning process in this system is a combination of three methods. The semantic network inference methods and the CBR method are employed to handle the difficulties of inferencing and reasoning in uncertain domains. The Bayesian network inference methods are employed to make the process more accurate. An experiment from oil well drilling as a complex and uncertain application domain is conducted. The system is evaluated against expert estimations and compared with seven other corresponding systems. The normalized discounted cumulative gain (NDCG) as a rank-based metric, the weighted error (WE), and root-square error (RSE) as the statistical metrics are employed to evaluate different aspects of the system capabilities. The results show the efficiency of the developed inference and reasoning methods.},
issn={2192-6360}, 959 959 issn={2192-6360},
doi={10.1007/s13748-020-00223-1}, 960 960 doi={10.1007/s13748-020-00223-1},
url={https://doi.org/10.1007/s13748-020-00223-1} 961 961 url={https://doi.org/10.1007/s13748-020-00223-1}
} 962 962 }
963 963
@article{PRESCOTT2024112577, 964 964 @article{PRESCOTT2024112577,
title = {Efficient multifidelity likelihood-free Bayesian inference with adaptive computational resource allocation}, 965 965 title = {Efficient multifidelity likelihood-free Bayesian inference with adaptive computational resource allocation},
journal = {Journal of Computational Physics}, 966 966 journal = {Journal of Computational Physics},
volume = {496}, 967 967 volume = {496},
pages = {112577}, 968 968 pages = {112577},
year = {2024}, 969 969 year = {2024},
issn = {0021-9991}, 970 970 issn = {0021-9991},
doi = {https://doi.org/10.1016/j.jcp.2023.112577}, 971 971 doi = {https://doi.org/10.1016/j.jcp.2023.112577},
url = {https://www.sciencedirect.com/science/article/pii/S0021999123006721}, 972 972 url = {https://www.sciencedirect.com/science/article/pii/S0021999123006721},
author = {Thomas P. Prescott and David J. Warne and Ruth E. Baker}, 973 973 author = {Thomas P. Prescott and David J. Warne and Ruth E. Baker},
keywords = {Likelihood-free Bayesian inference, Multifidelity approaches}, 974 974 keywords = {Likelihood-free Bayesian inference, Multifidelity approaches},
abstract = {Likelihood-free Bayesian inference algorithms are popular methods for inferring the parameters of complex stochastic models with intractable likelihoods. These algorithms characteristically rely heavily on repeated model simulations. However, whenever the computational cost of simulation is even moderately expensive, the significant burden incurred by likelihood-free algorithms leaves them infeasible for many practical applications. The multifidelity approach has been introduced in the context of approximate Bayesian computation to reduce the simulation burden of likelihood-free inference without loss of accuracy, by using the information provided by simulating computationally cheap, approximate models in place of the model of interest. In this work we demonstrate that multifidelity techniques can be applied in the general likelihood-free Bayesian inference setting. Analytical results on the optimal allocation of computational resources to simulations at different levels of fidelity are derived, and subsequently implemented practically. We provide an adaptive multifidelity likelihood-free inference algorithm that learns the relationships between models at different fidelities and adapts resource allocation accordingly, and demonstrate that this algorithm produces posterior estimates with near-optimal efficiency.} 975 975 abstract = {Likelihood-free Bayesian inference algorithms are popular methods for inferring the parameters of complex stochastic models with intractable likelihoods. These algorithms characteristically rely heavily on repeated model simulations. However, whenever the computational cost of simulation is even moderately expensive, the significant burden incurred by likelihood-free algorithms leaves them infeasible for many practical applications. The multifidelity approach has been introduced in the context of approximate Bayesian computation to reduce the simulation burden of likelihood-free inference without loss of accuracy, by using the information provided by simulating computationally cheap, approximate models in place of the model of interest. In this work we demonstrate that multifidelity techniques can be applied in the general likelihood-free Bayesian inference setting. Analytical results on the optimal allocation of computational resources to simulations at different levels of fidelity are derived, and subsequently implemented practically. We provide an adaptive multifidelity likelihood-free inference algorithm that learns the relationships between models at different fidelities and adapts resource allocation accordingly, and demonstrate that this algorithm produces posterior estimates with near-optimal efficiency.}
} 976 976 }
977 977
@article{RISTIC202030, 978 978 @article{RISTIC202030,
title = {A tutorial on uncertainty modeling for machine reasoning}, 979 979 title = {A tutorial on uncertainty modeling for machine reasoning},
journal = {Information Fusion}, 980 980 journal = {Information Fusion},
volume = {55}, 981 981 volume = {55},
pages = {30-44}, 982 982 pages = {30-44},
year = {2020}, 983 983 year = {2020},
issn = {1566-2535}, 984 984 issn = {1566-2535},
doi = {https://doi.org/10.1016/j.inffus.2019.08.001}, 985 985 doi = {https://doi.org/10.1016/j.inffus.2019.08.001},
url = {https://www.sciencedirect.com/science/article/pii/S1566253519301976}, 986 986 url = {https://www.sciencedirect.com/science/article/pii/S1566253519301976},
author = {Branko Ristic and Christopher Gilliam and Marion Byrne and Alessio Benavoli}, 987 987 author = {Branko Ristic and Christopher Gilliam and Marion Byrne and Alessio Benavoli},
keywords = {Information fusion, Uncertainty, Imprecision, Model based classification, Bayesian, Random sets, Belief function theory, Possibility functions, Imprecise probability}, 988 988 keywords = {Information fusion, Uncertainty, Imprecision, Model based classification, Bayesian, Random sets, Belief function theory, Possibility functions, Imprecise probability},
abstract = {Increasingly we rely on machine intelligence for reasoning and decision making under uncertainty. This tutorial reviews the prevalent methods for model-based autonomous decision making based on observations and prior knowledge, primarily in the context of classification. Both observations and the knowledge-base available for reasoning are treated as being uncertain. Accordingly, the central themes of this tutorial are quantitative modeling of uncertainty, the rules required to combine such uncertain information, and the task of decision making under uncertainty. The paper covers the main approaches to uncertain knowledge representation and reasoning, in particular, Bayesian probability theory, possibility theory, reasoning based on belief functions and finally imprecise probability theory. The main feature of the tutorial is that it illustrates various approaches with several testing scenarios, and provides MATLAB solutions for them as a supplementary material for an interested reader.} 989 989 abstract = {Increasingly we rely on machine intelligence for reasoning and decision making under uncertainty. This tutorial reviews the prevalent methods for model-based autonomous decision making based on observations and prior knowledge, primarily in the context of classification. Both observations and the knowledge-base available for reasoning are treated as being uncertain. Accordingly, the central themes of this tutorial are quantitative modeling of uncertainty, the rules required to combine such uncertain information, and the task of decision making under uncertainty. The paper covers the main approaches to uncertain knowledge representation and reasoning, in particular, Bayesian probability theory, possibility theory, reasoning based on belief functions and finally imprecise probability theory. The main feature of the tutorial is that it illustrates various approaches with several testing scenarios, and provides MATLAB solutions for them as a supplementary material for an interested reader.}
} 990 990 }
991 991
@article{CICIRELLO2022108619, 992 992 @article{CICIRELLO2022108619,
title = {Machine learning based optimization for interval uncertainty propagation}, 993 993 title = {Machine learning based optimization for interval uncertainty propagation},
journal = {Mechanical Systems and Signal Processing}, 994 994 journal = {Mechanical Systems and Signal Processing},
volume = {170}, 995 995 volume = {170},
pages = {108619}, 996 996 pages = {108619},
year = {2022}, 997 997 year = {2022},
issn = {0888-3270}, 998 998 issn = {0888-3270},
doi = {https://doi.org/10.1016/j.ymssp.2021.108619}, 999 999 doi = {https://doi.org/10.1016/j.ymssp.2021.108619},
url = {https://www.sciencedirect.com/science/article/pii/S0888327021009493}, 1000 1000 url = {https://www.sciencedirect.com/science/article/pii/S0888327021009493},
author = {Alice Cicirello and Filippo Giunta}, 1001 1001 author = {Alice Cicirello and Filippo Giunta},
keywords = {Bounded uncertainty, Bayesian optimization, Expensive-to-evaluate deterministic computer models, Gaussian process, Communicating uncertainty}, 1002 1002 keywords = {Bounded uncertainty, Bayesian optimization, Expensive-to-evaluate deterministic computer models, Gaussian process, Communicating uncertainty},
abstract = {Two non-intrusive uncertainty propagation approaches are proposed for the performance analysis of engineering systems described by expensive-to-evaluate deterministic computer models with parameters defined as interval variables. These approaches employ a machine learning based optimization strategy, the so-called Bayesian optimization, for evaluating the upper and lower bounds of a generic response variable over the set of possible responses obtained when each interval variable varies independently over its range. The lack of knowledge caused by not evaluating the response function for all the possible combinations of the interval variables is accounted for by developing a probabilistic description of the response variable itself by using a Gaussian Process regression model. An iterative procedure is developed for selecting a small number of simulations to be evaluated for updating this statistical model by using well-established acquisition functions and to assess the response bounds. In both approaches, an initial training dataset is defined. While one approach builds iteratively two distinct training datasets for evaluating separately the upper and lower bounds of the response variable, the other one builds iteratively a single training dataset. Consequently, the two approaches will produce different bound estimates at each iteration. The upper and lower response bounds are expressed as point estimates obtained from the mean function of the posterior distribution. Moreover, a confidence interval on each estimate is provided for effectively communicating to engineers when these estimates are obtained at a combination of the interval variables for which no deterministic simulation has been run. Finally, two metrics are proposed to define conditions for assessing if the predicted bound estimates can be considered satisfactory. The applicability of these two approaches is illustrated with two numerical applications, one focusing on vibration and the other on vibro-acoustics.} 1003 1003 abstract = {Two non-intrusive uncertainty propagation approaches are proposed for the performance analysis of engineering systems described by expensive-to-evaluate deterministic computer models with parameters defined as interval variables. These approaches employ a machine learning based optimization strategy, the so-called Bayesian optimization, for evaluating the upper and lower bounds of a generic response variable over the set of possible responses obtained when each interval variable varies independently over its range. The lack of knowledge caused by not evaluating the response function for all the possible combinations of the interval variables is accounted for by developing a probabilistic description of the response variable itself by using a Gaussian Process regression model. An iterative procedure is developed for selecting a small number of simulations to be evaluated for updating this statistical model by using well-established acquisition functions and to assess the response bounds. In both approaches, an initial training dataset is defined. While one approach builds iteratively two distinct training datasets for evaluating separately the upper and lower bounds of the response variable, the other one builds iteratively a single training dataset. Consequently, the two approaches will produce different bound estimates at each iteration. The upper and lower response bounds are expressed as point estimates obtained from the mean function of the posterior distribution. Moreover, a confidence interval on each estimate is provided for effectively communicating to engineers when these estimates are obtained at a combination of the interval variables for which no deterministic simulation has been run. Finally, two metrics are proposed to define conditions for assessing if the predicted bound estimates can be considered satisfactory. The applicability of these two approaches is illustrated with two numerical applications, one focusing on vibration and the other on vibro-acoustics.}
} 1004 1004 }
1005 1005
@INPROCEEDINGS{9278071, 1006 1006 @INPROCEEDINGS{9278071,
author={Petit, Maxime and Dellandrea, Emmanuel and Chen, Liming}, 1007 1007 author={Petit, Maxime and Dellandrea, Emmanuel and Chen, Liming},
booktitle={2020 Joint IEEE 10th International Conference on Development and Learning and Epigenetic Robotics (ICDL-EpiRob)}, 1008 1008 booktitle={2020 Joint IEEE 10th International Conference on Development and Learning and Epigenetic Robotics (ICDL-EpiRob)},
title={Bayesian Optimization for Developmental Robotics with Meta-Learning by Parameters Bounds Reduction}, 1009 1009 title={Bayesian Optimization for Developmental Robotics with Meta-Learning by Parameters Bounds Reduction},
year={2020}, 1010 1010 year={2020},
volume={}, 1011 1011 volume={},
number={}, 1012 1012 number={},
pages={1-8}, 1013 1013 pages={1-8},
keywords={Optimization;Robots;Task analysis;Bayes methods;Visualization;Service robots;Cognition;developmental robotics;long-term memory;meta learning;hyperparmeters automatic optimization;case-based reasoning}, 1014 1014 keywords={Optimization;Robots;Task analysis;Bayes methods;Visualization;Service robots;Cognition;developmental robotics;long-term memory;meta learning;hyperparmeters automatic optimization;case-based reasoning},
doi={10.1109/ICDL-EpiRob48136.2020.9278071} 1015 1015 doi={10.1109/ICDL-EpiRob48136.2020.9278071}
} 1016 1016 }
1017 1017
@article{LI2023477, 1018 1018 @article{LI2023477,
title = {Hierarchical and partitioned planning strategy for closed-loop devices in low-voltage distribution network based on improved KMeans partition method}, 1019 1019 title = {Hierarchical and partitioned planning strategy for closed-loop devices in low-voltage distribution network based on improved KMeans partition method},
journal = {Energy Reports}, 1020 1020 journal = {Energy Reports},
volume = {9}, 1021 1021 volume = {9},
pages = {477-485}, 1022 1022 pages = {477-485},
year = {2023}, 1023 1023 year = {2023},
note = {2022 The 3rd International Conference on Power and Electrical Engineering}, 1024 1024 note = {2022 The 3rd International Conference on Power and Electrical Engineering},
issn = {2352-4847}, 1025 1025 issn = {2352-4847},
doi = {https://doi.org/10.1016/j.egyr.2023.05.161}, 1026 1026 doi = {https://doi.org/10.1016/j.egyr.2023.05.161},
url = {https://www.sciencedirect.com/science/article/pii/S2352484723009137}, 1027 1027 url = {https://www.sciencedirect.com/science/article/pii/S2352484723009137},
author = {Jingqi Li and Junlin Li and Dan Wang and Chengxiong Mao and Zhitao Guan and Zhichao Liu and Miaomiao Du and Yuanzhuo Qi and Lexiang Wang and Wenge Liu and Pengfei Tang}, 1028 1028 author = {Jingqi Li and Junlin Li and Dan Wang and Chengxiong Mao and Zhitao Guan and Zhichao Liu and Miaomiao Du and Yuanzhuo Qi and Lexiang Wang and Wenge Liu and Pengfei Tang},
keywords = {Closed-loop device, Distribution network partition, Device planning, Hierarchical planning, Improved KMeans partition method}, 1029 1029 keywords = {Closed-loop device, Distribution network partition, Device planning, Hierarchical planning, Improved KMeans partition method},
abstract = {To improve the reliability of power supply, this paper proposes a hierarchical and partitioned planning strategy for closed-loop devices in low-voltage distribution network. Based on the geographic location and load situation of the distribution network area, an improved KMeans partition method is used to partition the area in the upper layer. In the lower layer, an intelligent algorithm is adopted to decide the numbers and placement locations of mobile low-voltage contact boxes and mobile seamless closed-loop load transfer devices in each partition with the goal of the highest closed-loop safety, the greatest improvement in annual power outage amount and the lowest cost. Finally, the feasibility and effectiveness of the proposed strategy are proved by an example.} 1030 1030 abstract = {To improve the reliability of power supply, this paper proposes a hierarchical and partitioned planning strategy for closed-loop devices in low-voltage distribution network. Based on the geographic location and load situation of the distribution network area, an improved KMeans partition method is used to partition the area in the upper layer. In the lower layer, an intelligent algorithm is adopted to decide the numbers and placement locations of mobile low-voltage contact boxes and mobile seamless closed-loop load transfer devices in each partition with the goal of the highest closed-loop safety, the greatest improvement in annual power outage amount and the lowest cost. Finally, the feasibility and effectiveness of the proposed strategy are proved by an example.}
} 1031 1031 }
1032 1032
@article{SAXENA2024100838, 1033 1033 @article{SAXENA2024100838,
title = {Hybrid KNN-SVM machine learning approach for solar power forecasting}, 1034 1034 title = {Hybrid KNN-SVM machine learning approach for solar power forecasting},
journal = {Environmental Challenges}, 1035 1035 journal = {Environmental Challenges},
volume = {14}, 1036 1036 volume = {14},
pages = {100838}, 1037 1037 pages = {100838},
year = {2024}, 1038 1038 year = {2024},
issn = {2667-0100}, 1039 1039 issn = {2667-0100},
doi = {https://doi.org/10.1016/j.envc.2024.100838}, 1040 1040 doi = {https://doi.org/10.1016/j.envc.2024.100838},
url = {https://www.sciencedirect.com/science/article/pii/S2667010024000040}, 1041 1041 url = {https://www.sciencedirect.com/science/article/pii/S2667010024000040},
author = {Nishant Saxena and Rahul Kumar and Yarrapragada K S S Rao and Dilbag Singh Mondloe and Nishikant Kishor Dhapekar and Abhishek Sharma and Anil Singh Yadav}, 1042 1042 author = {Nishant Saxena and Rahul Kumar and Yarrapragada K S S Rao and Dilbag Singh Mondloe and Nishikant Kishor Dhapekar and Abhishek Sharma and Anil Singh Yadav},
keywords = {Solar power forecasting, Hybrid model, KNN, Optimization, Solar energy, SVM}, 1043 1043 keywords = {Solar power forecasting, Hybrid model, KNN, Optimization, Solar energy, SVM},
abstract = {Predictions about solar power will have a significant impact on large-scale renewable energy plants. Photovoltaic (PV) power generation forecasting is particularly sensitive to measuring the uncertainty in weather conditions. Although several conventional techniques like long short-term memory (LSTM), support vector machine (SVM), etc. are available, but due to some restrictions, their application is limited. To enhance the precision of forecasting solar power from solar farms, a hybrid machine learning model that includes blends of the K-Nearest Neighbor (KNN) machine learning technique with the SVM to increase reliability for power system operators is proposed in this investigation. The conventional LSTM technique is also implemented to compare the performance of the proposed hybrid technique. The suggested hybrid model is improved by the use of structural diversity and data diversity in KNN and SVM, respectively. For the solar power predictions, the suggested method was tested on the Jodhpur real-time series dataset obtained from the data centers of weather stations using Meteonorm. The data set includes metrics such as Hourly Average Temperature (HAT), Hourly Total Sunlight Duration (HTSD), Hourly Total Global Solar Radiation (HTGSR), and Hourly Total Photovoltaic Energy Generation (HTPEG). The collated data has been segmented into training data, validation data, and testing data. Furthermore, the proposed technique performed better when evaluated on the three performance indices, viz., accuracy, sensitivity, and specificity. Compared with the conventional LSTM technique, the hybrid technique improved the prediction with 98\% accuracy.} 1044 1044 abstract = {Predictions about solar power will have a significant impact on large-scale renewable energy plants. Photovoltaic (PV) power generation forecasting is particularly sensitive to measuring the uncertainty in weather conditions. Although several conventional techniques like long short-term memory (LSTM), support vector machine (SVM), etc. are available, but due to some restrictions, their application is limited. To enhance the precision of forecasting solar power from solar farms, a hybrid machine learning model that includes blends of the K-Nearest Neighbor (KNN) machine learning technique with the SVM to increase reliability for power system operators is proposed in this investigation. The conventional LSTM technique is also implemented to compare the performance of the proposed hybrid technique. The suggested hybrid model is improved by the use of structural diversity and data diversity in KNN and SVM, respectively. For the solar power predictions, the suggested method was tested on the Jodhpur real-time series dataset obtained from the data centers of weather stations using Meteonorm. The data set includes metrics such as Hourly Average Temperature (HAT), Hourly Total Sunlight Duration (HTSD), Hourly Total Global Solar Radiation (HTGSR), and Hourly Total Photovoltaic Energy Generation (HTPEG). The collated data has been segmented into training data, validation data, and testing data. Furthermore, the proposed technique performed better when evaluated on the three performance indices, viz., accuracy, sensitivity, and specificity. Compared with the conventional LSTM technique, the hybrid technique improved the prediction with 98\% accuracy.}
} 1045 1045 }
1046 1046
@article{RAKESH2023100898, 1047 1047 @article{RAKESH2023100898,
title = {Moving object detection using modified GMM based background subtraction}, 1048 1048 title = {Moving object detection using modified GMM based background subtraction},
journal = {Measurement: Sensors}, 1049 1049 journal = {Measurement: Sensors},
volume = {30}, 1050 1050 volume = {30},
pages = {100898}, 1051 1051 pages = {100898},
year = {2023}, 1052 1052 year = {2023},
issn = {2665-9174}, 1053 1053 issn = {2665-9174},
doi = {https://doi.org/10.1016/j.measen.2023.100898}, 1054 1054 doi = {https://doi.org/10.1016/j.measen.2023.100898},
url = {https://www.sciencedirect.com/science/article/pii/S2665917423002349}, 1055 1055 url = {https://www.sciencedirect.com/science/article/pii/S2665917423002349},
author = {S. Rakesh and Nagaratna P. Hegde and M. {Venu Gopalachari} and D. Jayaram and Bhukya Madhu and Mohd Abdul Hameed and Ramdas Vankdothu and L.K. {Suresh Kumar}}, 1056 1056 author = {S. Rakesh and Nagaratna P. Hegde and M. {Venu Gopalachari} and D. Jayaram and Bhukya Madhu and Mohd Abdul Hameed and Ramdas Vankdothu and L.K. {Suresh Kumar}},
keywords = {Background subtraction, Gaussian mixture models, Intelligent video surveillance, Object detection}, 1057 1057 keywords = {Background subtraction, Gaussian mixture models, Intelligent video surveillance, Object detection},
abstract = {Academics have become increasingly interested in creating cutting-edge technologies to enhance Intelligent Video Surveillance (IVS) performance in terms of accuracy, speed, complexity, and deployment. It has been noted that precise object detection is the only way for IVS to function well in higher level applications including event interpretation, tracking, classification, and activity recognition. Through the use of cutting-edge techniques, the current study seeks to improve the performance accuracy of object detection techniques based on Gaussian Mixture Models (GMM). It is achieved by developing crucial phases in the object detecting process. In this study, it is discussed how to model each pixel as a mixture of Gaussians and how to update the model using an online k-means approximation. The adaptive mixture model's Gaussian distributions are then analyzed to identify which ones are more likely to be the product of a background process. Each pixel is categorized according to whether the background model is thought to include the Gaussian distribution that best depicts it.} 1058 1058 abstract = {Academics have become increasingly interested in creating cutting-edge technologies to enhance Intelligent Video Surveillance (IVS) performance in terms of accuracy, speed, complexity, and deployment. It has been noted that precise object detection is the only way for IVS to function well in higher level applications including event interpretation, tracking, classification, and activity recognition. Through the use of cutting-edge techniques, the current study seeks to improve the performance accuracy of object detection techniques based on Gaussian Mixture Models (GMM). It is achieved by developing crucial phases in the object detecting process. In this study, it is discussed how to model each pixel as a mixture of Gaussians and how to update the model using an online k-means approximation. The adaptive mixture model's Gaussian distributions are then analyzed to identify which ones are more likely to be the product of a background process. Each pixel is categorized according to whether the background model is thought to include the Gaussian distribution that best depicts it.}
} 1059 1059 }
1060 1060
@article{JIAO2022540, 1061 1061 @article{JIAO2022540,
title = {Interpretable fuzzy clustering using unsupervised fuzzy decision trees}, 1062 1062 title = {Interpretable fuzzy clustering using unsupervised fuzzy decision trees},
journal = {Information Sciences}, 1063 1063 journal = {Information Sciences},
volume = {611}, 1064 1064 volume = {611},
pages = {540-563}, 1065 1065 pages = {540-563},
year = {2022}, 1066 1066 year = {2022},
issn = {0020-0255}, 1067 1067 issn = {0020-0255},
doi = {https://doi.org/10.1016/j.ins.2022.08.077}, 1068 1068 doi = {https://doi.org/10.1016/j.ins.2022.08.077},
url = {https://www.sciencedirect.com/science/article/pii/S0020025522009872}, 1069 1069 url = {https://www.sciencedirect.com/science/article/pii/S0020025522009872},
author = {Lianmeng Jiao and Haoyu Yang and Zhun-ga Liu and Quan Pan}, 1070 1070 author = {Lianmeng Jiao and Haoyu Yang and Zhun-ga Liu and Quan Pan},
keywords = {Fuzzy clustering, Interpretable clustering, Unsupervised decision tree, Cluster merging}, 1071 1071 keywords = {Fuzzy clustering, Interpretable clustering, Unsupervised decision tree, Cluster merging},
abstract = {In clustering process, fuzzy partition performs better than hard partition when the boundaries between clusters are vague. Whereas, traditional fuzzy clustering algorithms produce less interpretable results, limiting their application in security, privacy, and ethics fields. To that end, this paper proposes an interpretable fuzzy clustering algorithm—fuzzy decision tree-based clustering which combines the flexibility of fuzzy partition with the interpretability of the decision tree. We constructed an unsupervised multi-way fuzzy decision tree to achieve the interpretability of clustering, in which each cluster is determined by one or several paths from the root to leaf nodes. The proposed algorithm comprises three main modules: feature and cutting point-selection, node fuzzy splitting, and cluster merging. The first two modules are repeated to generate an initial unsupervised decision tree, and the final module is designed to combine similar leaf nodes to form the final compact clustering model. Our algorithm optimizes an internal clustering validation metric to automatically determine the number of clusters without their initial positions. The synthetic and benchmark datasets were used to test the performance of the proposed algorithm. Furthermore, we provided two examples demonstrating its interest in solving practical problems.} 1072 1072 abstract = {In clustering process, fuzzy partition performs better than hard partition when the boundaries between clusters are vague. Whereas, traditional fuzzy clustering algorithms produce less interpretable results, limiting their application in security, privacy, and ethics fields. To that end, this paper proposes an interpretable fuzzy clustering algorithm—fuzzy decision tree-based clustering which combines the flexibility of fuzzy partition with the interpretability of the decision tree. We constructed an unsupervised multi-way fuzzy decision tree to achieve the interpretability of clustering, in which each cluster is determined by one or several paths from the root to leaf nodes. The proposed algorithm comprises three main modules: feature and cutting point-selection, node fuzzy splitting, and cluster merging. The first two modules are repeated to generate an initial unsupervised decision tree, and the final module is designed to combine similar leaf nodes to form the final compact clustering model. Our algorithm optimizes an internal clustering validation metric to automatically determine the number of clusters without their initial positions. The synthetic and benchmark datasets were used to test the performance of the proposed algorithm. Furthermore, we provided two examples demonstrating its interest in solving practical problems.}
} 1073 1073 }
1074 1074
@article{ARNAUGONZALEZ2023101516, 1075 1075 @article{ARNAUGONZALEZ2023101516,
title = {A methodological approach to enable natural language interaction in an Intelligent Tutoring System}, 1076 1076 title = {A methodological approach to enable natural language interaction in an Intelligent Tutoring System},
journal = {Computer Speech and Language}, 1077 1077 journal = {Computer Speech and Language},
volume = {81}, 1078 1078 volume = {81},
pages = {101516}, 1079 1079 pages = {101516},
year = {2023}, 1080 1080 year = {2023},
issn = {0885-2308}, 1081 1081 issn = {0885-2308},
doi = {https://doi.org/10.1016/j.csl.2023.101516}, 1082 1082 doi = {https://doi.org/10.1016/j.csl.2023.101516},
url = {https://www.sciencedirect.com/science/article/pii/S0885230823000359}, 1083 1083 url = {https://www.sciencedirect.com/science/article/pii/S0885230823000359},
author = {Pablo Arnau-González and Miguel Arevalillo-Herráez and Romina Albornoz-De Luise and David Arnau}, 1084 1084 author = {Pablo Arnau-González and Miguel Arevalillo-Herráez and Romina Albornoz-De Luise and David Arnau},
keywords = {Intelligent tutoring systems (ITS), Interactive learning environments (ILE), Conversational agents, Rasa, Natural language understanding (NLU), Natural language processing (NLP)}, 1085 1085 keywords = {Intelligent tutoring systems (ITS), Interactive learning environments (ILE), Conversational agents, Rasa, Natural language understanding (NLU), Natural language processing (NLP)},
abstract = {In this paper, we present and evaluate the recent incorporation of a conversational agent into an Intelligent Tutoring System (ITS), using the open-source machine learning framework Rasa. Once it has been appropriately trained, this tool is capable of identifying the intention of a given text input and extracting the relevant entities related to the message content. We describe both the generation of a realistic training set in Spanish language that enables the creation of the required Natural Language Understanding (NLU) models and the evaluation of the resulting system. For the generation of the training set, we have followed a methodology that can be easily exported to other ITS. The model evaluation shows that the conversational agent can correctly identify the majority of the user intents, reporting an f1-score above 95%, and cooperate with the ITS to produce a consistent dialogue flow that makes interaction more natural.} 1086 1086 abstract = {In this paper, we present and evaluate the recent incorporation of a conversational agent into an Intelligent Tutoring System (ITS), using the open-source machine learning framework Rasa. Once it has been appropriately trained, this tool is capable of identifying the intention of a given text input and extracting the relevant entities related to the message content. We describe both the generation of a realistic training set in Spanish language that enables the creation of the required Natural Language Understanding (NLU) models and the evaluation of the resulting system. For the generation of the training set, we have followed a methodology that can be easily exported to other ITS. The model evaluation shows that the conversational agent can correctly identify the majority of the user intents, reporting an f1-score above 95%, and cooperate with the ITS to produce a consistent dialogue flow that makes interaction more natural.}
} 1087 1087 }
1088 1088
@article{MAO20224065, 1089 1089 @article{MAO20224065,
title = {An Exploratory Approach to Intelligent Quiz Question Recommendation}, 1090 1090 title = {An Exploratory Approach to Intelligent Quiz Question Recommendation},
journal = {Procedia Computer Science}, 1091 1091 journal = {Procedia Computer Science},
volume = {207}, 1092 1092 volume = {207},
pages = {4065-4074}, 1093 1093 pages = {4065-4074},
year = {2022}, 1094 1094 year = {2022},
note = {Knowledge-Based and Intelligent Information and Engineering Systems: Proceedings of the 26th International Conference KES2022}, 1095 1095 note = {Knowledge-Based and Intelligent Information and Engineering Systems: Proceedings of the 26th International Conference KES2022},
issn = {1877-0509}, 1096 1096 issn = {1877-0509},
doi = {https://doi.org/10.1016/j.procs.2022.09.469}, 1097 1097 doi = {https://doi.org/10.1016/j.procs.2022.09.469},
url = {https://www.sciencedirect.com/science/article/pii/S1877050922013631}, 1098 1098 url = {https://www.sciencedirect.com/science/article/pii/S1877050922013631},
author = {Kejie Mao and Qiwen Dong and Ye Wang and Daocheng Honga}, 1099 1099 author = {Kejie Mao and Qiwen Dong and Ye Wang and Daocheng Honga},
keywords = {question recommendation, two-sided recommender systems, reinforcement learning, intelligent tutoring}, 1100 1100 keywords = {question recommendation, two-sided recommender systems, reinforcement learning, intelligent tutoring},
abstract = {With the rapid advancement of ICT, the digital transformation on education is greatly accelerating in various applications. As a particularly prominent application of digital education, quiz question recommendation is playing a vital role in precision teaching, smart tutoring, and personalized learning. However, the looming challenge of quiz question recommender for students is to satisfy the question diversity demands for students ZPD (the zone of proximal development) stage dynamically online. Therefore, we propose to formalize quiz question recommendation with a novel approach of reinforcement learning based two-sided recommender system. We develop a recommendation framework RTR (Reinforcement-Learning based Two-sided Recommender Systems) for taking into account the interests of both sides of the system, learning and adapting to those interests in real time, and resulting in more satisfactory recommended content. This established recommendation framework captures question characters and student dynamic preferences by considering the emergence of both sides of the system, and it yields a better learning experience in the context of practical quiz question generation.} 1101 1101 abstract = {With the rapid advancement of ICT, the digital transformation on education is greatly accelerating in various applications. As a particularly prominent application of digital education, quiz question recommendation is playing a vital role in precision teaching, smart tutoring, and personalized learning. However, the looming challenge of quiz question recommender for students is to satisfy the question diversity demands for students ZPD (the zone of proximal development) stage dynamically online. Therefore, we propose to formalize quiz question recommendation with a novel approach of reinforcement learning based two-sided recommender system. We develop a recommendation framework RTR (Reinforcement-Learning based Two-sided Recommender Systems) for taking into account the interests of both sides of the system, learning and adapting to those interests in real time, and resulting in more satisfactory recommended content. This established recommendation framework captures question characters and student dynamic preferences by considering the emergence of both sides of the system, and it yields a better learning experience in the context of practical quiz question generation.}
} 1102 1102 }
1103 1103
@article{CLEMENTE2022118171, 1104 1104 @article{CLEMENTE2022118171,
title = {A proposal for an adaptive Recommender System based on competences and ontologies}, 1105 1105 title = {A proposal for an adaptive Recommender System based on competences and ontologies},
journal = {Expert Systems with Applications}, 1106 1106 journal = {Expert Systems with Applications},
volume = {208}, 1107 1107 volume = {208},
pages = {118171}, 1108 1108 pages = {118171},
year = {2022}, 1109 1109 year = {2022},
issn = {0957-4174}, 1110 1110 issn = {0957-4174},
doi = {https://doi.org/10.1016/j.eswa.2022.118171}, 1111 1111 doi = {https://doi.org/10.1016/j.eswa.2022.118171},
url = {https://www.sciencedirect.com/science/article/pii/S0957417422013392}, 1112 1112 url = {https://www.sciencedirect.com/science/article/pii/S0957417422013392},
author = {Julia Clemente and Héctor Yago and Javier {de Pedro-Carracedo} and Javier Bueno}, 1113 1113 author = {Julia Clemente and Héctor Yago and Javier {de Pedro-Carracedo} and Javier Bueno},
keywords = {Recommender system, , Ontology network, Methodological development, Student modeling}, 1114 1114 keywords = {Recommender system, , Ontology network, Methodological development, Student modeling},
abstract = {Context: 1115 1115 abstract = {Context:
Competences represent an interesting pedagogical support in many processes like diagnosis or recommendation. From these, it is possible to infer information about the progress of the student to provide help targeted both, trainers who must make adaptive tutoring decisions for each learner, and students to detect and correct their learning weaknesses. For the correct development of any of these tasks, it is important to have a suitable student model that allows the representation of the most significant information possible about the student. Additionally, it would be very advantageous for this modeling to incorporate mechanisms from which it would be possible to infer more information about the student’s state of knowledge. 1116 1116 Competences represent an interesting pedagogical support in many processes like diagnosis or recommendation. From these, it is possible to infer information about the progress of the student to provide help targeted both, trainers who must make adaptive tutoring decisions for each learner, and students to detect and correct their learning weaknesses. For the correct development of any of these tasks, it is important to have a suitable student model that allows the representation of the most significant information possible about the student. Additionally, it would be very advantageous for this modeling to incorporate mechanisms from which it would be possible to infer more information about the student’s state of knowledge.
Objective: 1117 1117 Objective:
To facilitate this goal, in this paper a new approach to develop an adaptive competence-based recommender system is proposed. 1118 1118 To facilitate this goal, in this paper a new approach to develop an adaptive competence-based recommender system is proposed.
Method: 1119 1119 Method:
We present a methodological development guide as well as a set of ontological and non-ontological resources to develop and adapt the prototype of the proposed recommender system. 1120 1120 We present a methodological development guide as well as a set of ontological and non-ontological resources to develop and adapt the prototype of the proposed recommender system.
Results: 1121 1121 Results:
A modular flexible ontology network previously built for this purpose has been extended, which is responsible for recording the instructional design and student information. Furthermore, we describe a case study based on a first aid learning experience to assess the prototype with the proposed methodology. 1122 1122 A modular flexible ontology network previously built for this purpose has been extended, which is responsible for recording the instructional design and student information. Furthermore, we describe a case study based on a first aid learning experience to assess the prototype with the proposed methodology.
Conclusions: 1123 1123 Conclusions:
We highlight the relevance of flexibility and adaptability in learning modeling and recommendation processes. In order to promote improvement in the personalized learning of students, we present a Recommender System prototype taking advantages of ontologies, with a methodological guide, a broad taxonomy of recommendation criteria and the nature of competences. Future lines of research lines, including a more comprehensive evaluation of the system, will allow us to demonstrate in depth its adaptability according to the characteristics of the student, flexibility and extensibility for its integration in various environments and domains.} 1124 1124 We highlight the relevance of flexibility and adaptability in learning modeling and recommendation processes. In order to promote improvement in the personalized learning of students, we present a Recommender System prototype taking advantages of ontologies, with a methodological guide, a broad taxonomy of recommendation criteria and the nature of competences. Future lines of research lines, including a more comprehensive evaluation of the system, will allow us to demonstrate in depth its adaptability according to the characteristics of the student, flexibility and extensibility for its integration in various environments and domains.}
} 1125 1125 }
1126 1126
@article{https://doi.org/10.1155/2023/2578286, 1127 1127 @article{https://doi.org/10.1155/2023/2578286,
author = {Li, Linqing and Wang, Zhifeng}, 1128 1128 author = {Li, Linqing and Wang, Zhifeng},
title = {Knowledge Graph-Enhanced Intelligent Tutoring System Based on Exercise Representativeness and Informativeness}, 1129 1129 title = {Knowledge Graph-Enhanced Intelligent Tutoring System Based on Exercise Representativeness and Informativeness},
journal = {International Journal of Intelligent Systems}, 1130 1130 journal = {International Journal of Intelligent Systems},
volume = {2023}, 1131 1131 volume = {2023},
number = {1}, 1132 1132 number = {1},
pages = {2578286}, 1133 1133 pages = {2578286},
doi = {https://doi.org/10.1155/2023/2578286}, 1134 1134 doi = {https://doi.org/10.1155/2023/2578286},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1155/2023/2578286}, 1135 1135 url = {https://onlinelibrary.wiley.com/doi/abs/10.1155/2023/2578286},
eprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1155/2023/2578286}, 1136 1136 eprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1155/2023/2578286},
abstract = {In the realm of online tutoring intelligent systems, e-learners are exposed to a substantial volume of learning content. The extraction and organization of exercises and skills hold significant importance in establishing clear learning objectives and providing appropriate exercise recommendations. Presently, knowledge graph-based recommendation algorithms have garnered considerable attention among researchers. However, these algorithms solely consider knowledge graphs with single relationships and do not effectively model exercise-rich features, such as exercise representativeness and informativeness. Consequently, this paper proposes a framework, namely, the Knowledge Graph Importance-Exercise Representativeness and Informativeness Framework, to address these two issues. The framework consists of four intricate components and a novel cognitive diagnosis model called the Neural Attentive Cognitive Diagnosis model to recommend the proper exercises. These components encompass the informativeness component, exercise representation component, knowledge importance component, and exercise representativeness component. The informativeness component evaluates the informational value of each exercise and identifies the candidate exercise set (EC) that exhibits the highest exercise informativeness. Moreover, the exercise representation component utilizes a graph neural network to process student records. The output of the graph neural network serves as the input for exercise-level attention and skill-level attention, ultimately generating exercise embeddings and skill embeddings. Furthermore, the skill embeddings are employed as input for the knowledge importance component. This component transforms a one-dimensional knowledge graph into a multidimensional one through four class relations and calculates skill importance weights based on novelty and popularity. Subsequently, the exercise representativeness component incorporates exercise weight knowledge coverage to select exercises from the candidate exercise set for the tested exercise set. Lastly, the cognitive diagnosis model leverages exercise representation and skill importance weights to predict student performance on the test set and estimate their knowledge state. To evaluate the effectiveness of our selection strategy, extensive experiments were conducted on two types of publicly available educational datasets. The experimental results demonstrate that our framework can recommend appropriate exercises to students, leading to improved student performance.}, 1137 1137 abstract = {In the realm of online tutoring intelligent systems, e-learners are exposed to a substantial volume of learning content. The extraction and organization of exercises and skills hold significant importance in establishing clear learning objectives and providing appropriate exercise recommendations. Presently, knowledge graph-based recommendation algorithms have garnered considerable attention among researchers. However, these algorithms solely consider knowledge graphs with single relationships and do not effectively model exercise-rich features, such as exercise representativeness and informativeness. Consequently, this paper proposes a framework, namely, the Knowledge Graph Importance-Exercise Representativeness and Informativeness Framework, to address these two issues. The framework consists of four intricate components and a novel cognitive diagnosis model called the Neural Attentive Cognitive Diagnosis model to recommend the proper exercises. These components encompass the informativeness component, exercise representation component, knowledge importance component, and exercise representativeness component. The informativeness component evaluates the informational value of each exercise and identifies the candidate exercise set (EC) that exhibits the highest exercise informativeness. Moreover, the exercise representation component utilizes a graph neural network to process student records. The output of the graph neural network serves as the input for exercise-level attention and skill-level attention, ultimately generating exercise embeddings and skill embeddings. Furthermore, the skill embeddings are employed as input for the knowledge importance component. This component transforms a one-dimensional knowledge graph into a multidimensional one through four class relations and calculates skill importance weights based on novelty and popularity. Subsequently, the exercise representativeness component incorporates exercise weight knowledge coverage to select exercises from the candidate exercise set for the tested exercise set. Lastly, the cognitive diagnosis model leverages exercise representation and skill importance weights to predict student performance on the test set and estimate their knowledge state. To evaluate the effectiveness of our selection strategy, extensive experiments were conducted on two types of publicly available educational datasets. The experimental results demonstrate that our framework can recommend appropriate exercises to students, leading to improved student performance.},
year = {2023} 1138 1138 year = {2023}
} 1139 1139 }
1140 1140
@inproceedings{badier:hal-04092828, 1141 1141 @inproceedings{badier:hal-04092828,
TITLE = {{Comprendre les usages et effets d'un syst{\`e}me de recommandations p{\'e}dagogiques en contexte d'apprentissage non-formel}}, 1142 1142 TITLE = {{Comprendre les usages et effets d'un syst{\`e}me de recommandations p{\'e}dagogiques en contexte d'apprentissage non-formel}},
AUTHOR = {Badier, Ana{\"e}lle and Lefort, Mathieu and Lefevre, Marie}, 1143 1143 AUTHOR = {Badier, Ana{\"e}lle and Lefort, Mathieu and Lefevre, Marie},
URL = {https://hal.science/hal-04092828}, 1144 1144 URL = {https://hal.science/hal-04092828},
BOOKTITLE = {{EIAH'23}}, 1145 1145 BOOKTITLE = {{EIAH'23}},
ADDRESS = {Brest, France}, 1146 1146 ADDRESS = {Brest, France},
YEAR = {2023}, 1147 1147 YEAR = {2023},
MONTH = Jun, 1148 1148 MONTH = Jun,
HAL_ID = {hal-04092828}, 1149 1149 HAL_ID = {hal-04092828},
HAL_VERSION = {v1}, 1150 1150 HAL_VERSION = {v1},
} 1151 1151 }
1152 1152
@article{BADRA2023108920, 1153 1153 @article{BADRA2023108920,
title = {Case-based prediction – A survey}, 1154 1154 title = {Case-based prediction – A survey},
journal = {International Journal of Approximate Reasoning}, 1155 1155 journal = {International Journal of Approximate Reasoning},
volume = {158}, 1156 1156 volume = {158},
pages = {108920}, 1157 1157 pages = {108920},
year = {2023}, 1158 1158 year = {2023},
issn = {0888-613X}, 1159 1159 issn = {0888-613X},
doi = {https://doi.org/10.1016/j.ijar.2023.108920}, 1160 1160 doi = {https://doi.org/10.1016/j.ijar.2023.108920},
url = {https://www.sciencedirect.com/science/article/pii/S0888613X23000440}, 1161 1161 url = {https://www.sciencedirect.com/science/article/pii/S0888613X23000440},
author = {Fadi Badra and Marie-Jeanne Lesot}, 1162 1162 author = {Fadi Badra and Marie-Jeanne Lesot},
keywords = {Case-based prediction, Analogical transfer, Similarity}, 1163 1163 keywords = {Case-based prediction, Analogical transfer, Similarity},
abstract = {This paper clarifies the relation between case-based prediction and analogical transfer. Case-based prediction consists in predicting the outcome associated with a new case directly from its comparison with a set of cases retrieved from a case base, by relying solely on a structured memory and some similarity measures. Analogical transfer is a cognitive process that allows to derive some new information about a target situation by applying a plausible inference principle, according to which if two situations are similar with respect to some criteria, then it is plausible that they are also similar with respect to other criteria. Case-based prediction algorithms are known to apply analogical transfer to make predictions, but the existing approaches are diverse, and developing a unified theory of case-based prediction remains a challenge. In this paper, we show that a common principle underlying case-based prediction methods is that they interpret the plausible inference as a transfer of similarity knowledge from a situation space to an outcome space. Among all potential outcomes, the predicted outcome is the one that optimizes this transfer, i.e., that makes the similarities in the outcome space most compatible with the observed similarities in the situation space. Based on this observation, a systematic analysis of the different theories of case-based prediction is presented, where the approaches are distinguished according to the type of knowledge used to measure the compatibility between the two sets of similarity relations.} 1164 1164 abstract = {This paper clarifies the relation between case-based prediction and analogical transfer. Case-based prediction consists in predicting the outcome associated with a new case directly from its comparison with a set of cases retrieved from a case base, by relying solely on a structured memory and some similarity measures. Analogical transfer is a cognitive process that allows to derive some new information about a target situation by applying a plausible inference principle, according to which if two situations are similar with respect to some criteria, then it is plausible that they are also similar with respect to other criteria. Case-based prediction algorithms are known to apply analogical transfer to make predictions, but the existing approaches are diverse, and developing a unified theory of case-based prediction remains a challenge. In this paper, we show that a common principle underlying case-based prediction methods is that they interpret the plausible inference as a transfer of similarity knowledge from a situation space to an outcome space. Among all potential outcomes, the predicted outcome is the one that optimizes this transfer, i.e., that makes the similarities in the outcome space most compatible with the observed similarities in the situation space. Based on this observation, a systematic analysis of the different theories of case-based prediction is presented, where the approaches are distinguished according to the type of knowledge used to measure the compatibility between the two sets of similarity relations.}
} 1165 1165 }
1166 1166
1167 1167
@Article{jmse11050890 , 1168 1168 @Article{jmse11050890 ,
AUTHOR = {Louvros, Panagiotis and Stefanidis, Fotios and Boulougouris, Evangelos and Komianos, Alexandros and Vassalos, Dracos}, 1169 1169 AUTHOR = {Louvros, Panagiotis and Stefanidis, Fotios and Boulougouris, Evangelos and Komianos, Alexandros and Vassalos, Dracos},
TITLE = {Machine Learning and Case-Based Reasoning for Real-Time Onboard Prediction of the Survivability of Ships}, 1170 1170 TITLE = {Machine Learning and Case-Based Reasoning for Real-Time Onboard Prediction of the Survivability of Ships},
JOURNAL = {Journal of Marine Science and Engineering}, 1171 1171 JOURNAL = {Journal of Marine Science and Engineering},
VOLUME = {11}, 1172 1172 VOLUME = {11},
YEAR = {2023}, 1173 1173 YEAR = {2023},
NUMBER = {5}, 1174 1174 NUMBER = {5},
ARTICLE-NUMBER = {890}, 1175 1175 ARTICLE-NUMBER = {890},
URL = {https://www.mdpi.com/2077-1312/11/5/890}, 1176 1176 URL = {https://www.mdpi.com/2077-1312/11/5/890},
ISSN = {2077-1312}, 1177 1177 ISSN = {2077-1312},
ABSTRACT = {The subject of damaged stability has greatly profited from the development of new tools and techniques in recent history. Specifically, the increased computational power and the probabilistic approach have transformed the subject, increasing accuracy and fidelity, hence allowing for a universal application and the inclusion of the most probable scenarios. Currently, all ships are evaluated for their stability and are expected to survive the dangers they will most likely face. However, further advancements in simulations have made it possible to further increase the fidelity and accuracy of simulated casualties. Multiple time domain and, to a lesser extent, Computational Fluid dynamics (CFD) solutions have been suggested as the next “evolutionary” step for damage stability. However, while those techniques are demonstrably more accurate, the computational power to utilize them for the task of probabilistic evaluation is not there yet. In this paper, the authors present a novel approach that aims to serve as a stopgap measure for introducing the time domain simulations in the existing framework. Specifically, the methodology presented serves the purpose of a fast decision support tool which is able to provide information regarding the ongoing casualty utilizing prior knowledge gained from simulations. This work was needed and developed for the purposes of the EU-funded project SafePASS.}, 1178 1178 ABSTRACT = {The subject of damaged stability has greatly profited from the development of new tools and techniques in recent history. Specifically, the increased computational power and the probabilistic approach have transformed the subject, increasing accuracy and fidelity, hence allowing for a universal application and the inclusion of the most probable scenarios. Currently, all ships are evaluated for their stability and are expected to survive the dangers they will most likely face. However, further advancements in simulations have made it possible to further increase the fidelity and accuracy of simulated casualties. Multiple time domain and, to a lesser extent, Computational Fluid dynamics (CFD) solutions have been suggested as the next “evolutionary” step for damage stability. However, while those techniques are demonstrably more accurate, the computational power to utilize them for the task of probabilistic evaluation is not there yet. In this paper, the authors present a novel approach that aims to serve as a stopgap measure for introducing the time domain simulations in the existing framework. Specifically, the methodology presented serves the purpose of a fast decision support tool which is able to provide information regarding the ongoing casualty utilizing prior knowledge gained from simulations. This work was needed and developed for the purposes of the EU-funded project SafePASS.},
DOI = {10.3390/jmse11050890} 1179 1179 DOI = {10.3390/jmse11050890}
} 1180 1180 }
1181 1181
1182 1182
@Article{su14031366, 1183 1183 @Article{su14031366,
AUTHOR = {Chun, Se-Hak and Jang, Jae-Won}, 1184 1184 AUTHOR = {Chun, Se-Hak and Jang, Jae-Won},
TITLE = {A New Trend Pattern-Matching Method of Interactive Case-Based Reasoning for Stock Price Predictions}, 1185 1185 TITLE = {A New Trend Pattern-Matching Method of Interactive Case-Based Reasoning for Stock Price Predictions},
JOURNAL = {Sustainability}, 1186 1186 JOURNAL = {Sustainability},
VOLUME = {14}, 1187 1187 VOLUME = {14},
YEAR = {2022}, 1188 1188 YEAR = {2022},
NUMBER = {3}, 1189 1189 NUMBER = {3},
ARTICLE-NUMBER = {1366}, 1190 1190 ARTICLE-NUMBER = {1366},
URL = {https://www.mdpi.com/2071-1050/14/3/1366}, 1191 1191 URL = {https://www.mdpi.com/2071-1050/14/3/1366},
ISSN = {2071-1050}, 1192 1192 ISSN = {2071-1050},
ABSTRACT = {In this paper, we suggest a new case-based reasoning method for stock price predictions using the knowledge of traders to select similar past patterns among nearest neighbors obtained from a traditional case-based reasoning machine. Thus, this method overcomes the limitation of conventional case-based reasoning, which does not consider how to retrieve similar neighbors from previous patterns in terms of a graphical pattern. In this paper, we show how the proposed method can be used when traders find similar time series patterns among nearest cases. For this, we suggest an interactive prediction system where traders can select similar patterns with individual knowledge among automatically recommended neighbors by case-based reasoning. In this paper, we demonstrate how traders can use their knowledge to select similar patterns using a graphical interface, serving as an exemplar for the target. These concepts are investigated against the backdrop of a practical application involving the prediction of three individual stock prices, i.e., Zoom, Airbnb, and Twitter, as well as the prediction of the Dow Jones Industrial Average (DJIA). The verification of the prediction results is compared with a random walk model based on the RMSE and Hit ratio. The results show that the proposed technique is more effective than the random walk model but it does not statistically surpass the random walk model.}, 1193 1193 ABSTRACT = {In this paper, we suggest a new case-based reasoning method for stock price predictions using the knowledge of traders to select similar past patterns among nearest neighbors obtained from a traditional case-based reasoning machine. Thus, this method overcomes the limitation of conventional case-based reasoning, which does not consider how to retrieve similar neighbors from previous patterns in terms of a graphical pattern. In this paper, we show how the proposed method can be used when traders find similar time series patterns among nearest cases. For this, we suggest an interactive prediction system where traders can select similar patterns with individual knowledge among automatically recommended neighbors by case-based reasoning. In this paper, we demonstrate how traders can use their knowledge to select similar patterns using a graphical interface, serving as an exemplar for the target. These concepts are investigated against the backdrop of a practical application involving the prediction of three individual stock prices, i.e., Zoom, Airbnb, and Twitter, as well as the prediction of the Dow Jones Industrial Average (DJIA). The verification of the prediction results is compared with a random walk model based on the RMSE and Hit ratio. The results show that the proposed technique is more effective than the random walk model but it does not statistically surpass the random walk model.},
DOI = {10.3390/su14031366} 1194 1194 DOI = {10.3390/su14031366}
} 1195 1195 }
1196 1196
@Article{fire7040107, 1197 1197 @Article{fire7040107,
AUTHOR = {Pei, Qiuyan and Jia, Zhichao and Liu, Jia and Wang, Yi and Wang, Junhui and Zhang, Yanqi}, 1198 1198 AUTHOR = {Pei, Qiuyan and Jia, Zhichao and Liu, Jia and Wang, Yi and Wang, Junhui and Zhang, Yanqi},
TITLE = {Prediction of Coal Spontaneous Combustion Hazard Grades Based on Fuzzy Clustered Case-Based Reasoning}, 1199 1199 TITLE = {Prediction of Coal Spontaneous Combustion Hazard Grades Based on Fuzzy Clustered Case-Based Reasoning},
JOURNAL = {Fire}, 1200 1200 JOURNAL = {Fire},
VOLUME = {7}, 1201 1201 VOLUME = {7},
YEAR = {2024}, 1202 1202 YEAR = {2024},
NUMBER = {4}, 1203 1203 NUMBER = {4},
ARTICLE-NUMBER = {107}, 1204 1204 ARTICLE-NUMBER = {107},
URL = {https://www.mdpi.com/2571-6255/7/4/107}, 1205 1205 URL = {https://www.mdpi.com/2571-6255/7/4/107},
ISSN = {2571-6255}, 1206 1206 ISSN = {2571-6255},
ABSTRACT = {Accurate prediction of the coal spontaneous combustion hazard grades is of great significance to ensure the safe production of coal mines. However, traditional coal temperature prediction models have low accuracy and do not predict the coal spontaneous combustion hazard grades. In order to accurately predict coal spontaneous combustion hazard grades, a prediction model of coal spontaneous combustion based on principal component analysis (PCA), case-based reasoning (CBR), fuzzy clustering (FM), and the snake optimization (SO) algorithm was proposed in this manuscript. Firstly, based on the change rule of the concentration of signature gases in the process of coal warming, a new method of classifying the risk of spontaneous combustion of coal was established. Secondly, MeanRadius-SMOTE was adopted to balance the data structure. The weights of the prediction indicators were calculated through PCA to enhance the prediction precision of the CBR model. Then, by employing FM in the case base, the computational cost of CBR was reduced and its computational efficiency was improved. The SO algorithm was used to determine the hyperparameters in the PCA-FM-CBR model. In addition, multiple comparative experiments were conducted to verify the superiority of the model proposed in this manuscript. The results indicated that SO-PCA-FM-CBR possesses good prediction performance and also improves computational efficiency. Finally, the authors of this manuscript adopted the Random Balance Designs—Fourier Amplitude Sensitivity Test (RBD-FAST) to explain the output of the model and analyzed the global importance of input variables. The results demonstrated that CO is the most important variable affecting the coal spontaneous combustion hazard grades.}, 1207 1207 ABSTRACT = {Accurate prediction of the coal spontaneous combustion hazard grades is of great significance to ensure the safe production of coal mines. However, traditional coal temperature prediction models have low accuracy and do not predict the coal spontaneous combustion hazard grades. In order to accurately predict coal spontaneous combustion hazard grades, a prediction model of coal spontaneous combustion based on principal component analysis (PCA), case-based reasoning (CBR), fuzzy clustering (FM), and the snake optimization (SO) algorithm was proposed in this manuscript. Firstly, based on the change rule of the concentration of signature gases in the process of coal warming, a new method of classifying the risk of spontaneous combustion of coal was established. Secondly, MeanRadius-SMOTE was adopted to balance the data structure. The weights of the prediction indicators were calculated through PCA to enhance the prediction precision of the CBR model. Then, by employing FM in the case base, the computational cost of CBR was reduced and its computational efficiency was improved. The SO algorithm was used to determine the hyperparameters in the PCA-FM-CBR model. In addition, multiple comparative experiments were conducted to verify the superiority of the model proposed in this manuscript. The results indicated that SO-PCA-FM-CBR possesses good prediction performance and also improves computational efficiency. Finally, the authors of this manuscript adopted the Random Balance Designs—Fourier Amplitude Sensitivity Test (RBD-FAST) to explain the output of the model and analyzed the global importance of input variables. The results demonstrated that CO is the most important variable affecting the coal spontaneous combustion hazard grades.},
DOI = {10.3390/fire7040107} 1208 1208 DOI = {10.3390/fire7040107}
} 1209 1209 }
1210 1210
@Article{Desmarais2012, 1211 1211 @Article{Desmarais2012,
author={Desmarais, Michel C. 1212 1212 author={Desmarais, Michel C.
and Baker, Ryan S. J. d.}, 1213 1213 and Baker, Ryan S. J. d.},
title={A review of recent advances in learner and skill modeling in intelligent learning environments}, 1214 1214 title={A review of recent advances in learner and skill modeling in intelligent learning environments},
journal={User Modeling and User-Adapted Interaction}, 1215 1215 journal={User Modeling and User-Adapted Interaction},
year={2012}, 1216 1216 year={2012},
month={Apr}, 1217 1217 month={Apr},
day={01}, 1218 1218 day={01},
volume={22}, 1219 1219 volume={22},
number={1}, 1220 1220 number={1},
pages={9-38}, 1221 1221 pages={9-38},
abstract={In recent years, learner models have emerged from the research laboratory and research classrooms into the wider world. Learner models are now embedded in real world applications which can claim to have thousands, or even hundreds of thousands, of users. Probabilistic models for skill assessment are playing a key role in these advanced learning environments. In this paper, we review the learner models that have played the largest roles in the success of these learning environments, and also the latest advances in the modeling and assessment of learner skills. We conclude by discussing related advancements in modeling other key constructs such as learner motivation, emotional and attentional state, meta-cognition and self-regulated learning, group learning, and the recent movement towards open and shared learner models.}, 1222 1222 abstract={In recent years, learner models have emerged from the research laboratory and research classrooms into the wider world. Learner models are now embedded in real world applications which can claim to have thousands, or even hundreds of thousands, of users. Probabilistic models for skill assessment are playing a key role in these advanced learning environments. In this paper, we review the learner models that have played the largest roles in the success of these learning environments, and also the latest advances in the modeling and assessment of learner skills. We conclude by discussing related advancements in modeling other key constructs such as learner motivation, emotional and attentional state, meta-cognition and self-regulated learning, group learning, and the recent movement towards open and shared learner models.},
issn={1573-1391}, 1223 1223 issn={1573-1391},
doi={10.1007/s11257-011-9106-8}, 1224 1224 doi={10.1007/s11257-011-9106-8},
url={https://doi.org/10.1007/s11257-011-9106-8} 1225 1225 url={https://doi.org/10.1007/s11257-011-9106-8}
} 1226 1226 }
1227 1227
@article{Eide, 1228 1228 @article{Eide,
title={Dynamic slate recommendation with gated recurrent units and Thompson sampling}, 1229 1229 title={Dynamic slate recommendation with gated recurrent units and Thompson sampling},
author={Eide, Simen and Leslie, David S. and Frigessi, Arnoldo}, 1230 1230 author={Eide, Simen and Leslie, David S. and Frigessi, Arnoldo},
language={English}, 1231 1231 language={English},
type={article}, 1232 1232 type={article},
volume = {36}, 1233 1233 volume = {36},
year = {2022}, 1234 1234 year = {2022},
issn = {1573-756X}, 1235 1235 issn = {1573-756X},
doi = {https://doi.org/10.1007/s10618-022-00849-w}, 1236 1236 doi = {https://doi.org/10.1007/s10618-022-00849-w},
url = {https://doi.org/10.1007/s10618-022-00849-w}, 1237 1237 url = {https://doi.org/10.1007/s10618-022-00849-w},
abstract={We consider the problem of recommending relevant content to users of an internet platform in the form of lists of items, called slates. We introduce a variational Bayesian Recurrent Neural Net recommender system that acts on time series of interactions between the internet platform and the user, and which scales to real world industrial situations. The recommender system is tested both online on real users, and on an offline dataset collected from a Norwegian web-based marketplace, FINN.no, that is made public for research. This is one of the first publicly available datasets which includes all the slates that are presented to users as well as which items (if any) in the slates were clicked on. Such a data set allows us to move beyond the common assumption that implicitly assumes that users are considering all possible items at each interaction. Instead we build our likelihood using the items that are actually in the slate, and evaluate the strengths and weaknesses of both approaches theoretically and in experiments. We also introduce a hierarchical prior for the item parameters based on group memberships. Both item parameters and user preferences are learned probabilistically. Furthermore, we combine our model with bandit strategies to ensure learning, and introduce ‘in-slate Thompson sampling’ which makes use of the slates to maximise explorative opportunities. We show experimentally that explorative recommender strategies perform on par or above their greedy counterparts. Even without making use of exploration to learn more effectively, click rates increase simply because of improved diversity in the recommended slates.} 1238 1238 abstract={We consider the problem of recommending relevant content to users of an internet platform in the form of lists of items, called slates. We introduce a variational Bayesian Recurrent Neural Net recommender system that acts on time series of interactions between the internet platform and the user, and which scales to real world industrial situations. The recommender system is tested both online on real users, and on an offline dataset collected from a Norwegian web-based marketplace, FINN.no, that is made public for research. This is one of the first publicly available datasets which includes all the slates that are presented to users as well as which items (if any) in the slates were clicked on. Such a data set allows us to move beyond the common assumption that implicitly assumes that users are considering all possible items at each interaction. Instead we build our likelihood using the items that are actually in the slate, and evaluate the strengths and weaknesses of both approaches theoretically and in experiments. We also introduce a hierarchical prior for the item parameters based on group memberships. Both item parameters and user preferences are learned probabilistically. Furthermore, we combine our model with bandit strategies to ensure learning, and introduce ‘in-slate Thompson sampling’ which makes use of the slates to maximise explorative opportunities. We show experimentally that explorative recommender strategies perform on par or above their greedy counterparts. Even without making use of exploration to learn more effectively, click rates increase simply because of improved diversity in the recommended slates.}
} 1239 1239 }
1240 1240
@InProceedings{10.1007/978-3-031-09680-8_14, 1241 1241 @InProceedings{10.1007/978-3-031-09680-8_14,
author={Sablayrolles, Louis 1242 1242 author={Sablayrolles, Louis
and Lefevre, Marie 1243 1243 and Lefevre, Marie
and Guin, Nathalie 1244 1244 and Guin, Nathalie
and Broisin, Julien}, 1245 1245 and Broisin, Julien},
editor={Crossley, Scott 1246 1246 editor={Crossley, Scott
and Popescu, Elvira}, 1247 1247 and Popescu, Elvira},
title={Design and Evaluation of a Competency-Based Recommendation Process}, 1248 1248 title={Design and Evaluation of a Competency-Based Recommendation Process},
booktitle={Intelligent Tutoring Systems}, 1249 1249 booktitle={Intelligent Tutoring Systems},
year={2022}, 1250 1250 year={2022},
publisher={Springer International Publishing}, 1251 1251 publisher={Springer International Publishing},
address={Cham}, 1252 1252 address={Cham},
pages={148--160}, 1253 1253 pages={148--160},
abstract={The purpose of recommending activities to learners is to provide them with resources adapted to their needs, to facilitate the learning process. However, when teachers face a large number of students, it is difficult for them to recommend a personalized list of resources to each learner. In this paper, we are interested in the design of a system that automatically recommends resources to learners using their cognitive profile expressed in terms of competencies, but also according to a specific strategy defined by teachers. Our contributions relate to (1) a competency-based pedagogical strategy allowing to express the teacher's expertise, and (2) a recommendation process based on this strategy. This process has been experimented and assessed with students learning Shell programming in a first-year computer science degree. The first results show that (i) the items selected by our system from the set of possible items were relevant according to the experts; (ii) our system provided recommendations in a reasonable time; (iii) the recommendations were consulted by the learners but lacked usability.}, 1254 1254 abstract={The purpose of recommending activities to learners is to provide them with resources adapted to their needs, to facilitate the learning process. However, when teachers face a large number of students, it is difficult for them to recommend a personalized list of resources to each learner. In this paper, we are interested in the design of a system that automatically recommends resources to learners using their cognitive profile expressed in terms of competencies, but also according to a specific strategy defined by teachers. Our contributions relate to (1) a competency-based pedagogical strategy allowing to express the teacher's expertise, and (2) a recommendation process based on this strategy. This process has been experimented and assessed with students learning Shell programming in a first-year computer science degree. The first results show that (i) the items selected by our system from the set of possible items were relevant according to the experts; (ii) our system provided recommendations in a reasonable time; (iii) the recommendations were consulted by the learners but lacked usability.},
isbn={978-3-031-09680-8} 1255 1255 isbn={978-3-031-09680-8}
} 1256 1256 }
1257 1257
@inproceedings{10.1145/3578337.3605122, 1258 1258 @inproceedings{10.1145/3578337.3605122,
author = {Xu, Shuyuan and Ge, Yingqiang and Li, Yunqi and Fu, Zuohui and Chen, Xu and Zhang, Yongfeng}, 1259 1259 author = {Xu, Shuyuan and Ge, Yingqiang and Li, Yunqi and Fu, Zuohui and Chen, Xu and Zhang, Yongfeng},
title = {Causal Collaborative Filtering}, 1260 1260 title = {Causal Collaborative Filtering},
year = {2023}, 1261 1261 year = {2023},
isbn = {9798400700736}, 1262 1262 isbn = {9798400700736},
publisher = {Association for Computing Machinery}, 1263 1263 publisher = {Association for Computing Machinery},
address = {New York, NY, USA}, 1264 1264 address = {New York, NY, USA},
url = {https://doi.org/10.1145/3578337.3605122}, 1265 1265 url = {https://doi.org/10.1145/3578337.3605122},
doi = {10.1145/3578337.3605122}, 1266 1266 doi = {10.1145/3578337.3605122},
abstract = {Many of the traditional recommendation algorithms are designed based on the fundamental idea of mining or learning correlative patterns from data to estimate the user-item correlative preference. However, pure correlative learning may lead to Simpson's paradox in predictions, and thus results in sacrificed recommendation performance. Simpson's paradox is a well-known statistical phenomenon, which causes confusions in statistical conclusions and ignoring the paradox may result in inaccurate decisions. Fortunately, causal and counterfactual modeling can help us to think outside of the observational data for user modeling and personalization so as to tackle such issues. In this paper, we propose Causal Collaborative Filtering (CCF) --- a general framework for modeling causality in collaborative filtering and recommendation. We provide a unified causal view of CF and mathematically show that many of the traditional CF algorithms are actually special cases of CCF under simplified causal graphs. We then propose a conditional intervention approach for do-operations so that we can estimate the user-item causal preference based on the observational data. Finally, we further propose a general counterfactual constrained learning framework for estimating the user-item preferences. Experiments are conducted on two types of real-world datasets---traditional and randomized trial data---and results show that our framework can improve the recommendation performance and reduce the Simpson's paradox problem of many CF algorithms.}, 1267 1267 abstract = {Many of the traditional recommendation algorithms are designed based on the fundamental idea of mining or learning correlative patterns from data to estimate the user-item correlative preference. However, pure correlative learning may lead to Simpson's paradox in predictions, and thus results in sacrificed recommendation performance. Simpson's paradox is a well-known statistical phenomenon, which causes confusions in statistical conclusions and ignoring the paradox may result in inaccurate decisions. Fortunately, causal and counterfactual modeling can help us to think outside of the observational data for user modeling and personalization so as to tackle such issues. In this paper, we propose Causal Collaborative Filtering (CCF) --- a general framework for modeling causality in collaborative filtering and recommendation. We provide a unified causal view of CF and mathematically show that many of the traditional CF algorithms are actually special cases of CCF under simplified causal graphs. We then propose a conditional intervention approach for do-operations so that we can estimate the user-item causal preference based on the observational data. Finally, we further propose a general counterfactual constrained learning framework for estimating the user-item preferences. Experiments are conducted on two types of real-world datasets---traditional and randomized trial data---and results show that our framework can improve the recommendation performance and reduce the Simpson's paradox problem of many CF algorithms.},
booktitle = {Proceedings of the 2023 ACM SIGIR International Conference on Theory of Information Retrieval}, 1268 1268 booktitle = {Proceedings of the 2023 ACM SIGIR International Conference on Theory of Information Retrieval},
pages = {235–245}, 1269 1269 pages = {235–245},
numpages = {11}, 1270 1270 numpages = {11},
keywords = {recommender systems, counterfactual reasoning, collaborative filtering, causal analysis, Simpson's paradox}, 1271 1271 keywords = {recommender systems, counterfactual reasoning, collaborative filtering, causal analysis, Simpson's paradox},
location = {Taipei, Taiwan}, 1272 1272 location = {Taipei, Taiwan},
series = {ICTIR '23} 1273 1273 series = {ICTIR '23}
} 1274 1274 }
1275 1275
@inproceedings{10.1145/3583780.3615048, 1276 1276 @inproceedings{10.1145/3583780.3615048,
author = {Zhu, Zheqing and Van Roy, Benjamin}, 1277 1277 author = {Zhu, Zheqing and Van Roy, Benjamin},
title = {Scalable Neural Contextual Bandit for Recommender Systems}, 1278 1278 title = {Scalable Neural Contextual Bandit for Recommender Systems},
year = {2023}, 1279 1279 year = {2023},
isbn = {9798400701245}, 1280 1280 isbn = {9798400701245},
publisher = {Association for Computing Machinery}, 1281 1281 publisher = {Association for Computing Machinery},
address = {New York, NY, USA}, 1282 1282 address = {New York, NY, USA},
url = {https://doi.org/10.1145/3583780.3615048}, 1283 1283 url = {https://doi.org/10.1145/3583780.3615048},
doi = {10.1145/3583780.3615048}, 1284 1284 doi = {10.1145/3583780.3615048},
abstract = {High-quality recommender systems ought to deliver both innovative and relevant content through effective and exploratory interactions with users. Yet, supervised learning-based neural networks, which form the backbone of many existing recommender systems, only leverage recognized user interests, falling short when it comes to efficiently uncovering unknown user preferences. While there has been some progress with neural contextual bandit algorithms towards enabling online exploration through neural networks, their onerous computational demands hinder widespread adoption in real-world recommender systems. In this work, we propose a scalable sample-efficient neural contextual bandit algorithm for recommender systems. To do this, we design an epistemic neural network architecture, Epistemic Neural Recommendation (ENR), that enables Thompson sampling at a large scale. In two distinct large-scale experiments with real-world tasks, ENR significantly boosts click-through rates and user ratings by at least 9\% and 6\% respectively compared to state-of-the-art neural contextual bandit algorithms. Furthermore, it achieves equivalent performance with at least 29\% fewer user interactions compared to the best-performing baseline algorithm. Remarkably, while accomplishing these improvements, ENR demands orders of magnitude fewer computational resources than neural contextual bandit baseline algorithms.}, 1285 1285 abstract = {High-quality recommender systems ought to deliver both innovative and relevant content through effective and exploratory interactions with users. Yet, supervised learning-based neural networks, which form the backbone of many existing recommender systems, only leverage recognized user interests, falling short when it comes to efficiently uncovering unknown user preferences. While there has been some progress with neural contextual bandit algorithms towards enabling online exploration through neural networks, their onerous computational demands hinder widespread adoption in real-world recommender systems. In this work, we propose a scalable sample-efficient neural contextual bandit algorithm for recommender systems. To do this, we design an epistemic neural network architecture, Epistemic Neural Recommendation (ENR), that enables Thompson sampling at a large scale. In two distinct large-scale experiments with real-world tasks, ENR significantly boosts click-through rates and user ratings by at least 9\% and 6\% respectively compared to state-of-the-art neural contextual bandit algorithms. Furthermore, it achieves equivalent performance with at least 29\% fewer user interactions compared to the best-performing baseline algorithm. Remarkably, while accomplishing these improvements, ENR demands orders of magnitude fewer computational resources than neural contextual bandit baseline algorithms.},
booktitle = {Proceedings of the 32nd ACM International Conference on Information and Knowledge Management}, 1286 1286 booktitle = {Proceedings of the 32nd ACM International Conference on Information and Knowledge Management},
pages = {3636–3646}, 1287 1287 pages = {3636–3646},
numpages = {11}, 1288 1288 numpages = {11},
keywords = {contextual bandits, decision making under uncertainty, exploration vs exploitation, recommender systems, reinforcement learning}, 1289 1289 keywords = {contextual bandits, decision making under uncertainty, exploration vs exploitation, recommender systems, reinforcement learning},
location = {Birmingham, United Kingdom}, 1290 1290 location = {Birmingham, United Kingdom},
series = {CIKM '23} 1291 1291 series = {CIKM '23}
} 1292 1292 }
1293 1293
@ARTICLE{10494875, 1294 1294 @ARTICLE{10494875,
author={Ghoorchian, Saeed and Kortukov, Evgenii and Maghsudi, Setareh}, 1295 1295 author={Ghoorchian, Saeed and Kortukov, Evgenii and Maghsudi, Setareh},
journal={IEEE Open Journal of Signal Processing}, 1296 1296 journal={IEEE Open Journal of Signal Processing},
title={Non-Stationary Linear Bandits With Dimensionality Reduction for Large-Scale Recommender Systems}, 1297 1297 title={Non-Stationary Linear Bandits With Dimensionality Reduction for Large-Scale Recommender Systems},
year={2024}, 1298 1298 year={2024},
volume={5}, 1299 1299 volume={5},
number={}, 1300 1300 number={},
pages={548-558}, 1301 1301 pages={548-558},
keywords={Vectors;Recommender systems;Decision making;Runtime;Signal processing algorithms;Covariance matrices;Robustness;Decision-making;multi-armed bandit;non-stationary environment;online learning;recommender systems}, 1302 1302 keywords={Vectors;Recommender systems;Decision making;Runtime;Signal processing algorithms;Covariance matrices;Robustness;Decision-making;multi-armed bandit;non-stationary environment;online learning;recommender systems},
doi={10.1109/OJSP.2024.3386490} 1303 1303 doi={10.1109/OJSP.2024.3386490}
} 1304 1304 }
1305 1305
@article{GIANNIKIS2024111752, 1306 1306 @article{GIANNIKIS2024111752,
title = {Reinforcement learning for addressing the cold-user problem in recommender systems}, 1307 1307 title = {Reinforcement learning for addressing the cold-user problem in recommender systems},
journal = {Knowledge-Based Systems}, 1308 1308 journal = {Knowledge-Based Systems},
volume = {294}, 1309 1309 volume = {294},
pages = {111752}, 1310 1310 pages = {111752},
year = {2024}, 1311 1311 year = {2024},
issn = {0950-7051}, 1312 1312 issn = {0950-7051},
doi = {https://doi.org/10.1016/j.knosys.2024.111752}, 1313 1313 doi = {https://doi.org/10.1016/j.knosys.2024.111752},
url = {https://www.sciencedirect.com/science/article/pii/S0950705124003873}, 1314 1314 url = {https://www.sciencedirect.com/science/article/pii/S0950705124003873},
author = {Stelios Giannikis and Flavius Frasincar and David Boekestijn}, 1315 1315 author = {Stelios Giannikis and Flavius Frasincar and David Boekestijn},
keywords = {Recommender systems, Reinforcement learning, Active learning, Cold-user problem}, 1316 1316 keywords = {Recommender systems, Reinforcement learning, Active learning, Cold-user problem},
abstract = {Recommender systems are widely used in webshops because of their ability to provide users with personalized recommendations. However, the cold-user problem (i.e., recommending items to new users) is an important issue many webshops face. With the recent General Data Protection Regulation in Europe, the use of additional user information such as demographics is not possible without the user’s explicit consent. Several techniques have been proposed to solve the cold-user problem. Many of these techniques utilize Active Learning (AL) methods, which let cold users rate items to provide better recommendations for them. In this research, we propose two novel approaches that combine reinforcement learning with AL to elicit the users’ preferences and provide them with personalized recommendations. We compare reinforcement learning approaches that are either AL-based or item-based, where the latter predicts users’ ratings of an item by using their ratings of similar items. Differently than many of the existing approaches, this comparison is made based on implicit user information. Using a large real-world dataset, we show that the item-based strategy is more accurate than the AL-based strategy as well as several existing AL strategies.} 1317 1317 abstract = {Recommender systems are widely used in webshops because of their ability to provide users with personalized recommendations. However, the cold-user problem (i.e., recommending items to new users) is an important issue many webshops face. With the recent General Data Protection Regulation in Europe, the use of additional user information such as demographics is not possible without the user’s explicit consent. Several techniques have been proposed to solve the cold-user problem. Many of these techniques utilize Active Learning (AL) methods, which let cold users rate items to provide better recommendations for them. In this research, we propose two novel approaches that combine reinforcement learning with AL to elicit the users’ preferences and provide them with personalized recommendations. We compare reinforcement learning approaches that are either AL-based or item-based, where the latter predicts users’ ratings of an item by using their ratings of similar items. Differently than many of the existing approaches, this comparison is made based on implicit user information. Using a large real-world dataset, we show that the item-based strategy is more accurate than the AL-based strategy as well as several existing AL strategies.}
} 1318 1318 }
1319 1319
@article{IFTIKHAR2024121541, 1320 1320 @article{IFTIKHAR2024121541,
title = {A reinforcement learning recommender system using bi-clustering and Markov Decision Process}, 1321 1321 title = {A reinforcement learning recommender system using bi-clustering and Markov Decision Process},
journal = {Expert Systems with Applications}, 1322 1322 journal = {Expert Systems with Applications},
volume = {237}, 1323 1323 volume = {237},
pages = {121541}, 1324 1324 pages = {121541},
year = {2024}, 1325 1325 year = {2024},
issn = {0957-4174}, 1326 1326 issn = {0957-4174},
doi = {https://doi.org/10.1016/j.eswa.2023.121541}, 1327 1327 doi = {https://doi.org/10.1016/j.eswa.2023.121541},
url = {https://www.sciencedirect.com/science/article/pii/S0957417423020432}, 1328 1328 url = {https://www.sciencedirect.com/science/article/pii/S0957417423020432},
author = {Arta Iftikhar and Mustansar Ali Ghazanfar and Mubbashir Ayub and Saad {Ali Alahmari} and Nadeem Qazi and Julie Wall}, 1329 1329 author = {Arta Iftikhar and Mustansar Ali Ghazanfar and Mubbashir Ayub and Saad {Ali Alahmari} and Nadeem Qazi and Julie Wall},
keywords = {Reinforcement learning, Markov Decision Process, Bi-clustering, Q-learning, Policy}, 1330 1330 keywords = {Reinforcement learning, Markov Decision Process, Bi-clustering, Q-learning, Policy},
abstract = {Collaborative filtering (CF) recommender systems are static in nature and does not adapt well with changing user preferences. User preferences may change after interaction with a system or after buying a product. Conventional CF clustering algorithms only identifies the distribution of patterns and hidden correlations globally. However, the impossibility of discovering local patterns by these algorithms, headed to the popularization of bi-clustering algorithms. Bi-clustering algorithms can analyze all dataset dimensions simultaneously and consequently, discover local patterns that deliver a better understanding of the underlying hidden correlations. In this paper, we modelled the recommendation problem as a sequential decision-making problem using Markov Decision Processes (MDP). To perform state representation for MDP, we first converted user-item votings matrix to a binary matrix. Then we performed bi-clustering on this binary matrix to determine a subset of similar rows and columns. A bi-cluster merging algorithm is designed to merge similar and overlapping bi-clusters. These bi-clusters are then mapped to a squared grid (SG). RL is applied on this SG to determine best policy to give recommendation to users. Start state is determined using Improved Triangle Similarity (ITR similarity measure. Reward function is computed as grid state overlapping in terms of users and items in current and prospective next state. A thorough comparative analysis was conducted, encompassing a diverse array of methodologies, including RL-based, pure Collaborative Filtering (CF), and clustering methods. The results demonstrate that our proposed method outperforms its competitors in terms of precision, recall, and optimal policy learning.} 1331 1331 abstract = {Collaborative filtering (CF) recommender systems are static in nature and does not adapt well with changing user preferences. User preferences may change after interaction with a system or after buying a product. Conventional CF clustering algorithms only identifies the distribution of patterns and hidden correlations globally. However, the impossibility of discovering local patterns by these algorithms, headed to the popularization of bi-clustering algorithms. Bi-clustering algorithms can analyze all dataset dimensions simultaneously and consequently, discover local patterns that deliver a better understanding of the underlying hidden correlations. In this paper, we modelled the recommendation problem as a sequential decision-making problem using Markov Decision Processes (MDP). To perform state representation for MDP, we first converted user-item votings matrix to a binary matrix. Then we performed bi-clustering on this binary matrix to determine a subset of similar rows and columns. A bi-cluster merging algorithm is designed to merge similar and overlapping bi-clusters. These bi-clusters are then mapped to a squared grid (SG). RL is applied on this SG to determine best policy to give recommendation to users. Start state is determined using Improved Triangle Similarity (ITR similarity measure. Reward function is computed as grid state overlapping in terms of users and items in current and prospective next state. A thorough comparative analysis was conducted, encompassing a diverse array of methodologies, including RL-based, pure Collaborative Filtering (CF), and clustering methods. The results demonstrate that our proposed method outperforms its competitors in terms of precision, recall, and optimal policy learning.}
} 1332 1332 }
1333 1333
@article{Soto2, 1334 1334 @article{Soto2,
author={Soto-Forero, Daniel and Ackermann, Simha and Betbeder, Marie-Laure and Henriet, Julien}, 1335 1335 author={Soto-Forero, Daniel and Ackermann, Simha and Betbeder, Marie-Laure and Henriet, Julien},
title={Automatic Real-Time Adaptation of Training Session Difficulty Using Rules and Reinforcement Learning in the AI-VT ITS}, 1336 1336 title={Automatic Real-Time Adaptation of Training Session Difficulty Using Rules and Reinforcement Learning in the AI-VT ITS},
journal = {International Journal of Modern Education and Computer Science(IJMECS)}, 1337 1337 journal = {International Journal of Modern Education and Computer Science(IJMECS)},
volume = {16}, 1338 1338 volume = {16},
pages = {56-71}, 1339 1339 pages = {56-71},
year = {2024}, 1340 1340 year = {2024},
issn = {2075-0161}, 1341 1341 issn = {2075-0161},
doi = { https://doi.org/10.5815/ijmecs.2024.03.05}, 1342 1342 doi = { https://doi.org/10.5815/ijmecs.2024.03.05},
url = {https://www.mecs-press.org/ijmecs/ijmecs-v16-n3/v16n3-5.html}, 1343 1343 url = {https://www.mecs-press.org/ijmecs/ijmecs-v16-n3/v16n3-5.html},
keywords={Real Time Adaptation, Intelligent Training System, Thompson Sampling, Case-Based Reasoning, Automatic Adaptation}, 1344 1344 keywords={Real Time Adaptation, Intelligent Training System, Thompson Sampling, Case-Based Reasoning, Automatic Adaptation},
abstract={Some of the most common and typical issues in the field of intelligent tutoring systems (ITS) are (i) the correct identification of learners’ difficulties in the learning process, (ii) the adaptation of content or presentation of the system according to the difficulties encountered, and (iii) the ability to adapt without initial data (cold-start). In some cases, the system tolerates modifications after the realization and assessment of competences. Other systems require complicated real-time adaptation since only a limited number of data can be captured. In that case, it must be analyzed properly and with a certain precision in order to obtain the appropriate adaptations. Generally, for the adaptation step, the ITS gathers common learners together and adapts their training similarly. Another type of adaptation is more personalized, but requires acquired or estimated information about each learner (previous grades, probability of success, etc.). Some of these parameters may be difficult to obtain, and others are imprecise and can lead to misleading adaptations. The adaptation using machine learning requires prior training with a lot of data. This article presents a model for the real time automatic adaptation of a predetermined session inside an ITS called AI-VT. This adaptation process is part of a case-based reasoning global model. The characteristics of the model proposed in this paper (i) require a limited number of data in order to generate a personalized adaptation, (ii) do not require training, (iii) are based on the correlation to complexity levels, and (iv) are able to adapt even at the cold-start stage. The proposed model is presented with two different configurations, deterministic and stochastic. The model has been tested with a database of 1000 learners, corresponding to different knowledge levels in three different scenarios. The results show the dynamic adaptation of the proposed model in both versions, with the adaptations obtained helping the system to evolve more rapidly and identify learner weaknesses in the different levels of complexity as well as the generation of pertinent recommendations in specific cases for each learner capacity.} 1345 1345 abstract={Some of the most common and typical issues in the field of intelligent tutoring systems (ITS) are (i) the correct identification of learners’ difficulties in the learning process, (ii) the adaptation of content or presentation of the system according to the difficulties encountered, and (iii) the ability to adapt without initial data (cold-start). In some cases, the system tolerates modifications after the realization and assessment of competences. Other systems require complicated real-time adaptation since only a limited number of data can be captured. In that case, it must be analyzed properly and with a certain precision in order to obtain the appropriate adaptations. Generally, for the adaptation step, the ITS gathers common learners together and adapts their training similarly. Another type of adaptation is more personalized, but requires acquired or estimated information about each learner (previous grades, probability of success, etc.). Some of these parameters may be difficult to obtain, and others are imprecise and can lead to misleading adaptations. The adaptation using machine learning requires prior training with a lot of data. This article presents a model for the real time automatic adaptation of a predetermined session inside an ITS called AI-VT. This adaptation process is part of a case-based reasoning global model. The characteristics of the model proposed in this paper (i) require a limited number of data in order to generate a personalized adaptation, (ii) do not require training, (iii) are based on the correlation to complexity levels, and (iv) are able to adapt even at the cold-start stage. The proposed model is presented with two different configurations, deterministic and stochastic. The model has been tested with a database of 1000 learners, corresponding to different knowledge levels in three different scenarios. The results show the dynamic adaptation of the proposed model in both versions, with the adaptations obtained helping the system to evolve more rapidly and identify learner weaknesses in the different levels of complexity as well as the generation of pertinent recommendations in specific cases for each learner capacity.}
} 1346 1346 }
1347 1347
@InProceedings{10.1007/978-3-031-63646-2_11 , 1348 1348 @InProceedings{10.1007/978-3-031-63646-2_11 ,
author={Soto-Forero, Daniel and Betbeder, Marie-Laure and Henriet, Julien}, 1349 1349 author={Soto-Forero, Daniel and Betbeder, Marie-Laure and Henriet, Julien},
editor={Recio-Garcia, Juan A. and Orozco-del-Castillo, Mauricio G. and Bridge, Derek}, 1350 1350 editor={Recio-Garcia, Juan A. and Orozco-del-Castillo, Mauricio G. and Bridge, Derek},
title={Ensemble Stacking Case-Based Reasoning for Regression}, 1351 1351 title={Ensemble Stacking Case-Based Reasoning for Regression},
booktitle={Case-Based Reasoning Research and Development}, 1352 1352 booktitle={Case-Based Reasoning Research and Development},
year={2024}, 1353 1353 year={2024},
publisher={Springer Nature Switzerland}, 1354 1354 publisher={Springer Nature Switzerland},
address={Cham}, 1355 1355 address={Cham},
pages={159--174}, 1356 1356 pages={159--174},
abstract={This paper presents a case-based reasoning algorithm with a two-stage iterative double stacking to find approximate solutions to one and multidimensional regression problems. This approach does not require training, so it can work with dynamic data at run time. The solutions are generated using stochastic algorithms in order to allow exploration of the solution space. The evaluation is performed by transforming the regression problem into an optimization problem with an associated objective function. The algorithm has been tested in comparison with nine classical regression algorithms on ten different regression databases extracted from the UCI site. The results show that the proposed algorithm generates solutions in most cases quite close to the real solutions. According to the RMSE, the proposed algorithm globally among the four best algorithms, according to MAE, to the fourth best algorithms of the ten evaluated, suggesting that the results are reasonably good.}, 1357 1357 abstract={This paper presents a case-based reasoning algorithm with a two-stage iterative double stacking to find approximate solutions to one and multidimensional regression problems. This approach does not require training, so it can work with dynamic data at run time. The solutions are generated using stochastic algorithms in order to allow exploration of the solution space. The evaluation is performed by transforming the regression problem into an optimization problem with an associated objective function. The algorithm has been tested in comparison with nine classical regression algorithms on ten different regression databases extracted from the UCI site. The results show that the proposed algorithm generates solutions in most cases quite close to the real solutions. According to the RMSE, the proposed algorithm globally among the four best algorithms, according to MAE, to the fourth best algorithms of the ten evaluated, suggesting that the results are reasonably good.},
isbn={978-3-031-63646-2} 1358 1358 isbn={978-3-031-63646-2}
} 1359 1359 }
1360 1360
@article{ZHANG2018189, 1361 1361 @article{ZHANG2018189,
title = {A three learning states Bayesian knowledge tracing model}, 1362 1362 title = {A three learning states Bayesian knowledge tracing model},
journal = {Knowledge-Based Systems}, 1363 1363 journal = {Knowledge-Based Systems},
volume = {148}, 1364 1364 volume = {148},
pages = {189-201}, 1365 1365 pages = {189-201},
year = {2018}, 1366 1366 year = {2018},
issn = {0950-7051}, 1367 1367 issn = {0950-7051},
doi = {https://doi.org/10.1016/j.knosys.2018.03.001}, 1368 1368 doi = {https://doi.org/10.1016/j.knosys.2018.03.001},
url = {https://www.sciencedirect.com/science/article/pii/S0950705118301199}, 1369 1369 url = {https://www.sciencedirect.com/science/article/pii/S0950705118301199},
author = {Kai Zhang and Yiyu Yao}, 1370 1370 author = {Kai Zhang and Yiyu Yao},
keywords = {Bayesian knowledge tracing, Three-way decisions}, 1371 1371 keywords = {Bayesian knowledge tracing, Three-way decisions},
abstract = {This paper proposes a Bayesian knowledge tracing model with three learning states by extending the original two learning states. We divide a learning process into three sections by using an evaluation function for three-way decisions. Advantages of such a trisection over traditional bisection are demonstrated by comparative experiments. We develop a three learning states model based on the trisection of the learning process. We apply the model to a series of comparative experiments with the original model. Qualitative and quantitative analyses of the experimental results indicate the superior performance of the proposed model over the original model in terms of prediction accuracies and related statistical measures.} 1372 1372 abstract = {This paper proposes a Bayesian knowledge tracing model with three learning states by extending the original two learning states. We divide a learning process into three sections by using an evaluation function for three-way decisions. Advantages of such a trisection over traditional bisection are demonstrated by comparative experiments. We develop a three learning states model based on the trisection of the learning process. We apply the model to a series of comparative experiments with the original model. Qualitative and quantitative analyses of the experimental results indicate the superior performance of the proposed model over the original model in terms of prediction accuracies and related statistical measures.}
} 1373 1373 }
1374 1374
@article{Li_2024, 1375 1375 @article{Li_2024,
doi = {10.3847/1538-4357/ad3215}, 1376 1376 doi = {10.3847/1538-4357/ad3215},
url = {https://dx.doi.org/10.3847/1538-4357/ad3215}, 1377 1377 url = {https://dx.doi.org/10.3847/1538-4357/ad3215},
year = {2024}, 1378 1378 year = {2024},
month = {apr}, 1379 1379 month = {apr},
publisher = {The American Astronomical Society}, 1380 1380 publisher = {The American Astronomical Society},
volume = {965}, 1381 1381 volume = {965},
number = {2}, 1382 1382 number = {2},
pages = {125}, 1383 1383 pages = {125},
author = {Zhigang Li and Zhejie Ding and Yu Yu and Pengjie Zhang}, 1384 1384 author = {Zhigang Li and Zhejie Ding and Yu Yu and Pengjie Zhang},
title = {The Kullback–Leibler Divergence and the Convergence Rate of Fast Covariance Matrix Estimators in Galaxy Clustering Analysis}, 1385 1385 title = {The Kullback–Leibler Divergence and the Convergence Rate of Fast Covariance Matrix Estimators in Galaxy Clustering Analysis},
journal = {The Astrophysical Journal}, 1386 1386 journal = {The Astrophysical Journal},
abstract = {We present a method to quantify the convergence rate of the fast estimators of the covariance matrices in the large-scale structure analysis. Our method is based on the Kullback–Leibler (KL) divergence, which describes the relative entropy of two probability distributions. As a case study, we analyze the delete-d jackknife estimator for the covariance matrix of the galaxy correlation function. We introduce the information factor or the normalized KL divergence with the help of a set of baseline covariance matrices to diagnose the information contained in the jackknife covariance matrix. Using a set of quick particle mesh mock catalogs designed for the Baryon Oscillation Spectroscopic Survey DR11 CMASS galaxy survey, we find that the jackknife resampling method succeeds in recovering the covariance matrix with 10 times fewer simulation mocks than that of the baseline method at small scales (s ≤ 40 h −1 Mpc). However, the ability to reduce the number of mock catalogs is degraded at larger scales due to the increasing bias on the jackknife covariance matrix. Note that the analysis in this paper can be applied to any fast estimator of the covariance matrix for galaxy clustering measurements.} 1387 1387 abstract = {We present a method to quantify the convergence rate of the fast estimators of the covariance matrices in the large-scale structure analysis. Our method is based on the Kullback–Leibler (KL) divergence, which describes the relative entropy of two probability distributions. As a case study, we analyze the delete-d jackknife estimator for the covariance matrix of the galaxy correlation function. We introduce the information factor or the normalized KL divergence with the help of a set of baseline covariance matrices to diagnose the information contained in the jackknife covariance matrix. Using a set of quick particle mesh mock catalogs designed for the Baryon Oscillation Spectroscopic Survey DR11 CMASS galaxy survey, we find that the jackknife resampling method succeeds in recovering the covariance matrix with 10 times fewer simulation mocks than that of the baseline method at small scales (s ≤ 40 h −1 Mpc). However, the ability to reduce the number of mock catalogs is degraded at larger scales due to the increasing bias on the jackknife covariance matrix. Note that the analysis in this paper can be applied to any fast estimator of the covariance matrix for galaxy clustering measurements.}
} 1388 1388 }
1389 1389
@Article{Kim2024, 1390 1390 @Article{Kim2024,
author={Kim, Wonjik}, 1391 1391 author={Kim, Wonjik},
title={A Random Focusing Method with Jensen--Shannon Divergence for Improving Deep Neural Network Performance Ensuring Architecture Consistency}, 1392 1392 title={A Random Focusing Method with Jensen--Shannon Divergence for Improving Deep Neural Network Performance Ensuring Architecture Consistency},
journal={Neural Processing Letters}, 1393 1393 journal={Neural Processing Letters},
year={2024}, 1394 1394 year={2024},
month={Jun}, 1395 1395 month={Jun},
day={17}, 1396 1396 day={17},
volume={56}, 1397 1397 volume={56},
number={4}, 1398 1398 number={4},
pages={199}, 1399 1399 pages={199},
abstract={Multiple hidden layers in deep neural networks perform non-linear transformations, enabling the extraction of meaningful features and the identification of relationships between input and output data. However, the gap between the training and real-world data can result in network overfitting, prompting the exploration of various preventive methods. The regularization technique called 'dropout' is widely used for deep learning models to improve the training of robust and generalized features. During the training phase with dropout, neurons in a particular layer are randomly selected to be ignored for each input. This random exclusion of neurons encourages the network to depend on different subsets of neurons at different times, fostering robustness and reducing sensitivity to specific neurons. This study introduces a novel approach called random focusing, departing from complete neuron exclusion in dropout. The proposed random focusing selectively highlights random neurons during training, aiming for a smoother transition between training and inference phases while keeping network architecture consistent. This study also incorporates Jensen--Shannon Divergence to enhance the stability and efficacy of the random focusing method. Experimental validation across tasks like image classification and semantic segmentation demonstrates the adaptability of the proposed methods across different network architectures, including convolutional neural networks and transformers.}, 1400 1400 abstract={Multiple hidden layers in deep neural networks perform non-linear transformations, enabling the extraction of meaningful features and the identification of relationships between input and output data. However, the gap between the training and real-world data can result in network overfitting, prompting the exploration of various preventive methods. The regularization technique called 'dropout' is widely used for deep learning models to improve the training of robust and generalized features. During the training phase with dropout, neurons in a particular layer are randomly selected to be ignored for each input. This random exclusion of neurons encourages the network to depend on different subsets of neurons at different times, fostering robustness and reducing sensitivity to specific neurons. This study introduces a novel approach called random focusing, departing from complete neuron exclusion in dropout. The proposed random focusing selectively highlights random neurons during training, aiming for a smoother transition between training and inference phases while keeping network architecture consistent. This study also incorporates Jensen--Shannon Divergence to enhance the stability and efficacy of the random focusing method. Experimental validation across tasks like image classification and semantic segmentation demonstrates the adaptability of the proposed methods across different network architectures, including convolutional neural networks and transformers.},
issn={1573-773X}, 1401 1401 issn={1573-773X},
doi={10.1007/s11063-024-11668-z}, 1402 1402 doi={10.1007/s11063-024-11668-z},
url={https://doi.org/10.1007/s11063-024-11668-z} 1403 1403 url={https://doi.org/10.1007/s11063-024-11668-z}
} 1404 1404 }
1405 1405
@InProceedings{pmlr-v238-ou24a, 1406 1406 @InProceedings{pmlr-v238-ou24a,
title = {Thompson Sampling Itself is Differentially Private}, 1407 1407 title = {Thompson Sampling Itself is Differentially Private},
author = {Ou, Tingting and Cummings, Rachel and Avella Medina, Marco}, 1408 1408 author = {Ou, Tingting and Cummings, Rachel and Avella Medina, Marco},
booktitle = {Proceedings of The 27th International Conference on Artificial Intelligence and Statistics}, 1409 1409 booktitle = {Proceedings of The 27th International Conference on Artificial Intelligence and Statistics},
pages = {1576--1584}, 1410 1410 pages = {1576--1584},
year = {2024}, 1411 1411 year = {2024},
editor = {Dasgupta, Sanjoy and Mandt, Stephan and Li, Yingzhen}, 1412 1412 editor = {Dasgupta, Sanjoy and Mandt, Stephan and Li, Yingzhen},
volume = {238}, 1413 1413 volume = {238},
series = {Proceedings of Machine Learning Research}, 1414 1414 series = {Proceedings of Machine Learning Research},
month = {02--04 May}, 1415 1415 month = {02--04 May},
publisher = {PMLR}, 1416 1416 publisher = {PMLR},
pdf = {https://proceedings.mlr.press/v238/ou24a/ou24a.pdf}, 1417 1417 pdf = {https://proceedings.mlr.press/v238/ou24a/ou24a.pdf},
url = {https://proceedings.mlr.press/v238/ou24a.html}, 1418 1418 url = {https://proceedings.mlr.press/v238/ou24a.html},
abstract = {In this work we first show that the classical Thompson sampling algorithm for multi-arm bandits is differentially private as-is, without any modification. We provide per-round privacy guarantees as a function of problem parameters and show composition over $T$ rounds; since the algorithm is unchanged, existing $O(\sqrt{NT\log N})$ regret bounds still hold and there is no loss in performance due to privacy. We then show that simple modifications – such as pre-pulling all arms a fixed number of times, increasing the sampling variance – can provide tighter privacy guarantees. We again provide privacy guarantees that now depend on the new parameters introduced in the modification, which allows the analyst to tune the privacy guarantee as desired. We also provide a novel regret analysis for this new algorithm, and show how the new parameters also impact expected regret. Finally, we empirically validate and illustrate our theoretical findings in two parameter regimes and demonstrate that tuning the new parameters substantially improve the privacy-regret tradeoff.} 1419 1419 abstract = {In this work we first show that the classical Thompson sampling algorithm for multi-arm bandits is differentially private as-is, without any modification. We provide per-round privacy guarantees as a function of problem parameters and show composition over $T$ rounds; since the algorithm is unchanged, existing $O(\sqrt{NT\log N})$ regret bounds still hold and there is no loss in performance due to privacy. We then show that simple modifications – such as pre-pulling all arms a fixed number of times, increasing the sampling variance – can provide tighter privacy guarantees. We again provide privacy guarantees that now depend on the new parameters introduced in the modification, which allows the analyst to tune the privacy guarantee as desired. We also provide a novel regret analysis for this new algorithm, and show how the new parameters also impact expected regret. Finally, we empirically validate and illustrate our theoretical findings in two parameter regimes and demonstrate that tuning the new parameters substantially improve the privacy-regret tradeoff.}
} 1420 1420 }
1421 1421
@Article{math12111758, 1422 1422 @Article{math12111758,
AUTHOR = {Uguina, Antonio R. and Gomez, Juan F. and Panadero, Javier and Martínez-Gavara, Anna and Juan, Angel A.}, 1423 1423 AUTHOR = {Uguina, Antonio R. and Gomez, Juan F. and Panadero, Javier and Martínez-Gavara, Anna and Juan, Angel A.},
TITLE = {A Learnheuristic Algorithm Based on Thompson Sampling for the Heterogeneous and Dynamic Team Orienteering Problem}, 1424 1424 TITLE = {A Learnheuristic Algorithm Based on Thompson Sampling for the Heterogeneous and Dynamic Team Orienteering Problem},
JOURNAL = {Mathematics}, 1425 1425 JOURNAL = {Mathematics},
VOLUME = {12}, 1426 1426 VOLUME = {12},
YEAR = {2024}, 1427 1427 YEAR = {2024},
NUMBER = {11}, 1428 1428 NUMBER = {11},
ARTICLE-NUMBER = {1758}, 1429 1429 ARTICLE-NUMBER = {1758},
URL = {https://www.mdpi.com/2227-7390/12/11/1758}, 1430 1430 URL = {https://www.mdpi.com/2227-7390/12/11/1758},
ISSN = {2227-7390}, 1431 1431 ISSN = {2227-7390},
ABSTRACT = {The team orienteering problem (TOP) is a well-studied optimization challenge in the field of Operations Research, where multiple vehicles aim to maximize the total collected rewards within a given time limit by visiting a subset of nodes in a network. With the goal of including dynamic and uncertain conditions inherent in real-world transportation scenarios, we introduce a novel dynamic variant of the TOP that considers real-time changes in environmental conditions affecting reward acquisition at each node. Specifically, we model the dynamic nature of environmental factors—such as traffic congestion, weather conditions, and battery level of each vehicle—to reflect their impact on the probability of obtaining the reward when visiting each type of node in a heterogeneous network. To address this problem, a learnheuristic optimization framework is proposed. It combines a metaheuristic algorithm with Thompson sampling to make informed decisions in dynamic environments. Furthermore, we conduct empirical experiments to assess the impact of varying reward probabilities on resource allocation and route planning within the context of this dynamic TOP, where nodes might offer a different reward behavior depending upon the environmental conditions. Our numerical results indicate that the proposed learnheuristic algorithm outperforms static approaches, achieving up to 25% better performance in highly dynamic scenarios. Our findings highlight the effectiveness of our approach in adapting to dynamic conditions and optimizing decision-making processes in transportation systems.}, 1432 1432 ABSTRACT = {The team orienteering problem (TOP) is a well-studied optimization challenge in the field of Operations Research, where multiple vehicles aim to maximize the total collected rewards within a given time limit by visiting a subset of nodes in a network. With the goal of including dynamic and uncertain conditions inherent in real-world transportation scenarios, we introduce a novel dynamic variant of the TOP that considers real-time changes in environmental conditions affecting reward acquisition at each node. Specifically, we model the dynamic nature of environmental factors—such as traffic congestion, weather conditions, and battery level of each vehicle—to reflect their impact on the probability of obtaining the reward when visiting each type of node in a heterogeneous network. To address this problem, a learnheuristic optimization framework is proposed. It combines a metaheuristic algorithm with Thompson sampling to make informed decisions in dynamic environments. Furthermore, we conduct empirical experiments to assess the impact of varying reward probabilities on resource allocation and route planning within the context of this dynamic TOP, where nodes might offer a different reward behavior depending upon the environmental conditions. Our numerical results indicate that the proposed learnheuristic algorithm outperforms static approaches, achieving up to 25% better performance in highly dynamic scenarios. Our findings highlight the effectiveness of our approach in adapting to dynamic conditions and optimizing decision-making processes in transportation systems.},
DOI = {10.3390/math12111758} 1433 1433 DOI = {10.3390/math12111758}
} 1434 1434 }
1435 1435
@inproceedings{NEURIPS2023_9d8cf124, 1436 1436 @inproceedings{NEURIPS2023_9d8cf124,
author = {Abel, David and Barreto, Andre and Van Roy, Benjamin and Precup, Doina and van Hasselt, Hado P and Singh, Satinder}, 1437 1437 author = {Abel, David and Barreto, Andre and Van Roy, Benjamin and Precup, Doina and van Hasselt, Hado P and Singh, Satinder},
booktitle = {Advances in Neural Information Processing Systems}, 1438 1438 booktitle = {Advances in Neural Information Processing Systems},
editor = {A. Oh and T. Naumann and A. Globerson and K. Saenko and M. Hardt and S. Levine}, 1439 1439 editor = {A. Oh and T. Naumann and A. Globerson and K. Saenko and M. Hardt and S. Levine},
pages = {50377--50407}, 1440 1440 pages = {50377--50407},
publisher = {Curran Associates, Inc.}, 1441 1441 publisher = {Curran Associates, Inc.},
title = {A Definition of Continual Reinforcement Learning}, 1442 1442 title = {A Definition of Continual Reinforcement Learning},
url = {https://proceedings.neurips.cc/paper_files/paper/2023/file/9d8cf1247786d6dfeefeeb53b8b5f6d7-Paper-Conference.pdf}, 1443 1443 url = {https://proceedings.neurips.cc/paper_files/paper/2023/file/9d8cf1247786d6dfeefeeb53b8b5f6d7-Paper-Conference.pdf},
volume = {36}, 1444 1444 volume = {36},
year = {2023} 1445 1445 year = {2023}
} 1446 1446 }
1447 1447
@article{NGUYEN2024111566, 1448 1448 @article{NGUYEN2024111566,
title = {Dynamic metaheuristic selection via Thompson Sampling for online optimization}, 1449 1449 title = {Dynamic metaheuristic selection via Thompson Sampling for online optimization},
journal = {Applied Soft Computing}, 1450 1450 journal = {Applied Soft Computing},
volume = {158}, 1451 1451 volume = {158},
pages = {111566}, 1452 1452 pages = {111566},
year = {2024}, 1453 1453 year = {2024},
issn = {1568-4946}, 1454 1454 issn = {1568-4946},
doi = {https://doi.org/10.1016/j.asoc.2024.111566}, 1455 1455 doi = {https://doi.org/10.1016/j.asoc.2024.111566},
url = {https://www.sciencedirect.com/science/article/pii/S1568494624003405}, 1456 1456 url = {https://www.sciencedirect.com/science/article/pii/S1568494624003405},
author = {Alain Nguyen}, 1457 1457 author = {Alain Nguyen},
keywords = {Selection hyper-heuristic, Multi-armed-bandit, Thompson Sampling, Online optimization}, 1458 1458 keywords = {Selection hyper-heuristic, Multi-armed-bandit, Thompson Sampling, Online optimization},
abstract = {It is acknowledged that no single heuristic can outperform all the others in every optimization problem. This has given rise to hyper-heuristic methods for providing solutions to a wider range of problems. In this work, a set of five non-competing low-level heuristics is proposed in a hyper-heuristic framework. The multi-armed bandit problem analogy is efficiently leveraged and Thompson Sampling is used to actively select the best heuristic for online optimization. The proposed method is compared against ten population-based metaheuristic algorithms on the well-known CEC’05 optimizing benchmark consisting of 23 functions of various landscapes. The results show that the proposed algorithm is the only one able to find the global minimum of all functions with remarkable consistency.} 1459 1459 abstract = {It is acknowledged that no single heuristic can outperform all the others in every optimization problem. This has given rise to hyper-heuristic methods for providing solutions to a wider range of problems. In this work, a set of five non-competing low-level heuristics is proposed in a hyper-heuristic framework. The multi-armed bandit problem analogy is efficiently leveraged and Thompson Sampling is used to actively select the best heuristic for online optimization. The proposed method is compared against ten population-based metaheuristic algorithms on the well-known CEC’05 optimizing benchmark consisting of 23 functions of various landscapes. The results show that the proposed algorithm is the only one able to find the global minimum of all functions with remarkable consistency.}
} 1460 1460 }
1461 1461
@Article{Malladi2024, 1462 1462 @Article{Malladi2024,
author={Malladi, Rama K.}, 1463 1463 author={Malladi, Rama K.},
title={Application of Supervised Machine Learning Techniques to Forecast the COVID-19 U.S. Recession and Stock Market Crash}, 1464 1464 title={Application of Supervised Machine Learning Techniques to Forecast the COVID-19 U.S. Recession and Stock Market Crash},
journal={Computational Economics}, 1465 1465 journal={Computational Economics},
year={2024}, 1466 1466 year={2024},
month={Mar}, 1467 1467 month={Mar},
day={01}, 1468 1468 day={01},
volume={63}, 1469 1469 volume={63},
number={3}, 1470 1470 number={3},
pages={1021-1045}, 1471 1471 pages={1021-1045},
abstract={Machine learning (ML), a transformational technology, has been successfully applied to forecasting events down the road. This paper demonstrates that supervised ML techniques can be used in recession and stock market crash (more than 20{\%} drawdown) forecasting. After learning from strictly past monthly data, ML algorithms detected the Covid-19 recession by December 2019, six months before the official NBER announcement. Moreover, ML algorithms foresaw the March 2020 S{\&}P500 crash two months before it happened. The current labor market and housing are harbingers of a future U.S. recession (in 3 months). Financial factors have a bigger role to play in stock market crashes than economic factors. The labor market appears as a top-two feature in predicting both recessions and crashes. ML algorithms detect that the U.S. exited recession before December 2020, even though the official NBER announcement has not yet been made. They also do not anticipate a U.S. stock market crash before March 2021. ML methods have three times higher false discovery rates of recessions compared to crashes.}, 1472 1472 abstract={Machine learning (ML), a transformational technology, has been successfully applied to forecasting events down the road. This paper demonstrates that supervised ML techniques can be used in recession and stock market crash (more than 20{\%} drawdown) forecasting. After learning from strictly past monthly data, ML algorithms detected the Covid-19 recession by December 2019, six months before the official NBER announcement. Moreover, ML algorithms foresaw the March 2020 S{\&}P500 crash two months before it happened. The current labor market and housing are harbingers of a future U.S. recession (in 3 months). Financial factors have a bigger role to play in stock market crashes than economic factors. The labor market appears as a top-two feature in predicting both recessions and crashes. ML algorithms detect that the U.S. exited recession before December 2020, even though the official NBER announcement has not yet been made. They also do not anticipate a U.S. stock market crash before March 2021. ML methods have three times higher false discovery rates of recessions compared to crashes.},
issn={1572-9974}, 1473 1473 issn={1572-9974},
doi={10.1007/s10614-022-10333-8}, 1474 1474 doi={10.1007/s10614-022-10333-8},
url={https://doi.org/10.1007/s10614-022-10333-8} 1475 1475 url={https://doi.org/10.1007/s10614-022-10333-8}
} 1476 1476 }
1477 1477
@INPROCEEDINGS{10493943, 1478 1478 @INPROCEEDINGS{10493943,
author={Raaa Subha and Naaa Gayathri and Saaa Sasireka and Raaa Sathiyabanu and Baaa Santhiyaa and Baaa Varshini}, 1479 1479 author={Raaa Subha and Naaa Gayathri and Saaa Sasireka and Raaa Sathiyabanu and Baaa Santhiyaa and Baaa Varshini},
booktitle={2024 5th International Conference on Mobile Computing and Sustainable Informatics (ICMCSI)}, 1480 1480 booktitle={2024 5th International Conference on Mobile Computing and Sustainable Informatics (ICMCSI)},
title={Intelligent Tutoring Systems using Long Short-Term Memory Networks and Bayesian Knowledge Tracing}, 1481 1481 title={Intelligent Tutoring Systems using Long Short-Term Memory Networks and Bayesian Knowledge Tracing},
year={2024}, 1482 1482 year={2024},
volume={0}, 1483 1483 volume={0},
number={0}, 1484 1484 number={0},
pages={24-29}, 1485 1485 pages={24-29},
keywords={Knowledge engineering;Filtering;Estimation;Transforms;Real-time systems;Bayes methods;Problem-solving;Intelligent Tutoring System (ITS);Long Short-Term Memory (LSTM);Bayesian Knowledge Tracing (BKT);Reinforcement Learning}, 1486 1486 keywords={Knowledge engineering;Filtering;Estimation;Transforms;Real-time systems;Bayes methods;Problem-solving;Intelligent Tutoring System (ITS);Long Short-Term Memory (LSTM);Bayesian Knowledge Tracing (BKT);Reinforcement Learning},
doi={10.1109/ICMCSI61536.2024.00010} 1487 1487 doi={10.1109/ICMCSI61536.2024.00010}
} 1488 1488 }
1489 1489
@article{https://doi.org/10.1155/2024/4067721, 1490 1490 @article{https://doi.org/10.1155/2024/4067721,
author = {Ahmed, Esmael}, 1491 1491 author = {Ahmed, Esmael},
title = {Student Performance Prediction Using Machine Learning Algorithms}, 1492 1492 title = {Student Performance Prediction Using Machine Learning Algorithms},
journal = {Applied Computational Intelligence and Soft Computing}, 1493 1493 journal = {Applied Computational Intelligence and Soft Computing},
volume = {2024}, 1494 1494 volume = {2024},
number = {1}, 1495 1495 number = {1},
pages = {4067721}, 1496 1496 pages = {4067721},
doi = {https://doi.org/10.1155/2024/4067721}, 1497 1497 doi = {https://doi.org/10.1155/2024/4067721},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1155/2024/4067721}, 1498 1498 url = {https://onlinelibrary.wiley.com/doi/abs/10.1155/2024/4067721},
eprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1155/2024/4067721}, 1499 1499 eprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1155/2024/4067721},
abstract = {Education is crucial for a productive life and providing necessary resources. With the advent of technology like artificial intelligence, higher education institutions are incorporating technology into traditional teaching methods. Predicting academic success has gained interest in education as a strong academic record improves a university’s ranking and increases student employment opportunities. Modern learning institutions face challenges in analyzing performance, providing high-quality education, formulating strategies for evaluating students’ performance, and identifying future needs. E-learning is a rapidly growing and advanced form of education, where students enroll in online courses. Platforms like Intelligent Tutoring Systems (ITS), learning management systems (LMS), and massive open online courses (MOOC) use educational data mining (EDM) to develop automatic grading systems, recommenders, and adaptative systems. However, e-learning is still considered a challenging learning environment due to the lack of direct interaction between students and course instructors. Machine learning (ML) is used in developing adaptive intelligent systems that can perform complex tasks beyond human abilities. Some areas of applications of ML algorithms include cluster analysis, pattern recognition, image processing, natural language processing, and medical diagnostics. In this research work, K-means, a clustering data mining technique using Davies’ Bouldin method, obtains clusters to find important features affecting students’ performance. The study found that the SVM algorithm had the best prediction results after parameter adjustment, with a 96\% accuracy rate. In this paper, the researchers have examined the functions of the Support Vector Machine, Decision Tree, naive Bayes, and KNN classifiers. The outcomes of parameter adjustment greatly increased the accuracy of the four prediction models. Naïve Bayes model’s prediction accuracy is the lowest when compared to other prediction methods, as it assumes a strong independent relationship between features.}, 1500 1500 abstract = {Education is crucial for a productive life and providing necessary resources. With the advent of technology like artificial intelligence, higher education institutions are incorporating technology into traditional teaching methods. Predicting academic success has gained interest in education as a strong academic record improves a university’s ranking and increases student employment opportunities. Modern learning institutions face challenges in analyzing performance, providing high-quality education, formulating strategies for evaluating students’ performance, and identifying future needs. E-learning is a rapidly growing and advanced form of education, where students enroll in online courses. Platforms like Intelligent Tutoring Systems (ITS), learning management systems (LMS), and massive open online courses (MOOC) use educational data mining (EDM) to develop automatic grading systems, recommenders, and adaptative systems. However, e-learning is still considered a challenging learning environment due to the lack of direct interaction between students and course instructors. Machine learning (ML) is used in developing adaptive intelligent systems that can perform complex tasks beyond human abilities. Some areas of applications of ML algorithms include cluster analysis, pattern recognition, image processing, natural language processing, and medical diagnostics. In this research work, K-means, a clustering data mining technique using Davies’ Bouldin method, obtains clusters to find important features affecting students’ performance. The study found that the SVM algorithm had the best prediction results after parameter adjustment, with a 96\% accuracy rate. In this paper, the researchers have examined the functions of the Support Vector Machine, Decision Tree, naive Bayes, and KNN classifiers. The outcomes of parameter adjustment greatly increased the accuracy of the four prediction models. Naïve Bayes model’s prediction accuracy is the lowest when compared to other prediction methods, as it assumes a strong independent relationship between features.},
year = {2024} 1501 1501 year = {2024}
} 1502 1502 }
1503 1503
@article{HAZEM, 1504 1504 @article{HAZEM,
author = {Hazem A. Alrakhawi and Nurullizam Jamiat and Samy S. Abu-Naser}, 1505 1505 author = {Hazem A. Alrakhawi and Nurullizam Jamiat and Samy S. Abu-Naser},
title = {Intelligent Tutoring Systems in education: A systematic review of usage, tools, effects and evaluation}, 1506 1506 title = {Intelligent Tutoring Systems in education: A systematic review of usage, tools, effects and evaluation},
journal = {Journal of Theoretical and Applied Information Technology}, 1507 1507 journal = {Journal of Theoretical and Applied Information Technology},
volume = {2023}, 1508 1508 volume = {2023},
number = {4}, 1509 1509 number = {4},
pages = {4067721}, 1510 1510 pages = {4067721},
doi = {}, 1511 1511 doi = {},
url = {}, 1512 1512 url = {},
abstract = {}, 1513 1513 abstract = {},
year = {2023} 1514 1514 year = {2023}
} 1515 1515 }
1516 1516
@Article{Liu2023, 1517 1517 @Article{Liu2023,
author={Liu, Mengchi 1518 1518 author={Liu, Mengchi
and Yu, Dongmei}, 1519 1519 and Yu, Dongmei},
title={Towards intelligent E-learning systems}, 1520 1520 title={Towards intelligent E-learning systems},
journal={Education and Information Technologies}, 1521 1521 journal={Education and Information Technologies},
year={2023}, 1522 1522 year={2023},
month={Jul}, 1523 1523 month={Jul},
day={01}, 1524 1524 day={01},
volume={28}, 1525 1525 volume={28},
number={7}, 1526 1526 number={7},
pages={7845-7876}, 1527 1527 pages={7845-7876},
abstract={The prevalence of e-learning systems has made educational resources more accessible, interactive and effective to learners without the geographic and temporal boundaries. However, as the number of users increases and the volume of data grows, current e-learning systems face some technical and pedagogical challenges. This paper provides a comprehensive review on the efforts of applying new information and communication technologies to improve e-learning services. We first systematically investigate current e-learning systems in terms of their classification, architecture, functions, challenges, and current trends. We then present a general architecture for big data based e-learning systems to meet the ever-growing demand for e-learning. We also describe how to use data generated in big data based e-learning systems to support more flexible and customized course delivery and personalized learning.}, 1528 1528 abstract={The prevalence of e-learning systems has made educational resources more accessible, interactive and effective to learners without the geographic and temporal boundaries. However, as the number of users increases and the volume of data grows, current e-learning systems face some technical and pedagogical challenges. This paper provides a comprehensive review on the efforts of applying new information and communication technologies to improve e-learning services. We first systematically investigate current e-learning systems in terms of their classification, architecture, functions, challenges, and current trends. We then present a general architecture for big data based e-learning systems to meet the ever-growing demand for e-learning. We also describe how to use data generated in big data based e-learning systems to support more flexible and customized course delivery and personalized learning.},
issn={1573-7608}, 1529 1529 issn={1573-7608},
doi={10.1007/s10639-022-11479-6}, 1530 1530 doi={10.1007/s10639-022-11479-6},
url={https://doi.org/10.1007/s10639-022-11479-6} 1531 1531 url={https://doi.org/10.1007/s10639-022-11479-6}
} 1532 1532 }
1533 1533
@InProceedings{10.1007/978-3-031-63646-2_13, 1534 1534 @InProceedings{10.1007/978-3-031-63646-2_13,
author="Soto-Forero, Daniel 1535 1535 author="Soto-Forero, Daniel
and Ackermann, Simha 1536 1536 and Ackermann, Simha
and Betbeder, Marie-Laure 1537 1537 and Betbeder, Marie-Laure
and Henriet, Julien", 1538 1538 and Henriet, Julien",
editor="Recio-Garcia, Juan A. 1539 1539 editor="Recio-Garcia, Juan A.
and Orozco-del-Castillo, Mauricio G. 1540 1540 and Orozco-del-Castillo, Mauricio G.
and Bridge, Derek", 1541 1541 and Bridge, Derek",
title="The Intelligent Tutoring System AI-VT with Case-Based Reasoning and Real Time Recommender Models", 1542 1542 title="The Intelligent Tutoring System AI-VT with Case-Based Reasoning and Real Time Recommender Models",
booktitle="Case-Based Reasoning Research and Development", 1543 1543 booktitle="Case-Based Reasoning Research and Development",
year="2024", 1544 1544 year="2024",
publisher="Springer Nature Switzerland", 1545 1545 publisher="Springer Nature Switzerland",
address="Cham", 1546 1546 address="Cham",
pages="191--205", 1547 1547 pages="191--205",
abstract="This paper presents a recommendation model coupled on an existing CBR system model through a new modular architecture designed to integrate multiple services in a learning system called AI-VT (Artificial Intelligence Training System). The recommendation model provides a semi-automatic review of the CBR, two variants of the recommendation model have been implemented: deterministic and stochastic. The model has been tested with 1000 simulated learners, and compared with an original CBR system and BKT (Bayesian Knowledge Tracing) recommender system. The results show that the proposed model identifies learners' weaknesses correctly and revises the content of the ITS (Intelligent Tutoring System) better than the original ITS with CBR. Compared to BKT, the results at each level of complexity are variable, but overall the proposed stochastic model obtains better results.", 1548 1548 abstract="This paper presents a recommendation model coupled on an existing CBR system model through a new modular architecture designed to integrate multiple services in a learning system called AI-VT (Artificial Intelligence Training System). The recommendation model provides a semi-automatic review of the CBR, two variants of the recommendation model have been implemented: deterministic and stochastic. The model has been tested with 1000 simulated learners, and compared with an original CBR system and BKT (Bayesian Knowledge Tracing) recommender system. The results show that the proposed model identifies learners' weaknesses correctly and revises the content of the ITS (Intelligent Tutoring System) better than the original ITS with CBR. Compared to BKT, the results at each level of complexity are variable, but overall the proposed stochastic model obtains better results.",
isbn="978-3-031-63646-2" 1549 1549 isbn="978-3-031-63646-2"
} 1550 1550 }
1551 1551
@article{doi:10.1137/23M1592420, 1552 1552 @article{doi:10.1137/23M1592420,
author = {Minsker, Stanislav and Strawn, Nate}, 1553 1553 author = {Minsker, Stanislav and Strawn, Nate},
title = {The Geometric Median and Applications to Robust Mean Estimation}, 1554 1554 title = {The Geometric Median and Applications to Robust Mean Estimation},
journal = {SIAM Journal on Mathematics of Data Science}, 1555 1555 journal = {SIAM Journal on Mathematics of Data Science},
volume = {6}, 1556 1556 volume = {6},
number = {2}, 1557 1557 number = {2},
pages = {504-533}, 1558 1558 pages = {504-533},
year = {2024}, 1559 1559 year = {2024},
doi = {10.1137/23M1592420}, 1560 1560 doi = {10.1137/23M1592420},
URL = { https://doi.org/10.1137/23M1592420}, 1561 1561 URL = { https://doi.org/10.1137/23M1592420},
eprint = {https://doi.org/10.1137/23M1592420}, 1562 1562 eprint = {https://doi.org/10.1137/23M1592420},
abstract = { Abstract.This paper is devoted to the statistical and numerical properties of the geometric median and its applications to the problem of robust mean estimation via the median of means principle. Our main theoretical results include (a) an upper bound for the distance between the mean and the median for general absolutely continuous distributions in \(\mathbb R^d\), and examples of specific classes of distributions for which these bounds do not depend on the ambient dimension \(d\); (b) exponential deviation inequalities for the distance between the sample and the population versions of the geometric median, which again depend only on the trace-type quantities and not on the ambient dimension. As a corollary, we deduce improved bounds for the (geometric) median of means estimator that hold for large classes of heavy-tailed distributions. Finally, we address the error of numerical approximation, which is an important practical aspect of any statistical estimation procedure. We demonstrate that the objective function minimized by the geometric median satisfies a “local quadratic growth” condition that allows one to translate suboptimality bounds for the objective function to the corresponding bounds for the numerical approximation to the median itself and propose a simple stopping rule applicable to any optimization method which yields explicit error guarantees. We conclude with the numerical experiments, including the application to estimation of mean values of log-returns for S\&P 500 data. } 1563 1563 abstract = { Abstract.This paper is devoted to the statistical and numerical properties of the geometric median and its applications to the problem of robust mean estimation via the median of means principle. Our main theoretical results include (a) an upper bound for the distance between the mean and the median for general absolutely continuous distributions in \(\mathbb R^d\), and examples of specific classes of distributions for which these bounds do not depend on the ambient dimension \(d\); (b) exponential deviation inequalities for the distance between the sample and the population versions of the geometric median, which again depend only on the trace-type quantities and not on the ambient dimension. As a corollary, we deduce improved bounds for the (geometric) median of means estimator that hold for large classes of heavy-tailed distributions. Finally, we address the error of numerical approximation, which is an important practical aspect of any statistical estimation procedure. We demonstrate that the objective function minimized by the geometric median satisfies a “local quadratic growth” condition that allows one to translate suboptimality bounds for the objective function to the corresponding bounds for the numerical approximation to the median itself and propose a simple stopping rule applicable to any optimization method which yields explicit error guarantees. We conclude with the numerical experiments, including the application to estimation of mean values of log-returns for S\&P 500 data. }
} 1564 1564 }
1565 1565
@article{lei2024analysis, 1566 1566 @article{lei2024analysis,
title={Analysis of Simpson’s Paradox and Its Applications}, 1567 1567 title={Analysis of Simpson’s Paradox and Its Applications},
author={Lei, Zhihao}, 1568 1568 author={Lei, Zhihao},
journal={Highlights in Science, Engineering and Technology}, 1569 1569 journal={Highlights in Science, Engineering and Technology},
volume={88}, 1570 1570 volume={88},
pages={357--362}, 1571 1571 pages={357--362},
year={2024} 1572 1572 year={2024}
} 1573 1573 }
1574 1574
@InProceedings{pmlr-v108-seznec20a, 1575 1575 @InProceedings{pmlr-v108-seznec20a,
title = {A single algorithm for both restless and rested rotting bandits}, 1576 1576 title = {A single algorithm for both restless and rested rotting bandits},
author = {Seznec, Julien and Menard, Pierre and Lazaric, Alessandro and Valko, Michal}, 1577 1577 author = {Seznec, Julien and Menard, Pierre and Lazaric, Alessandro and Valko, Michal},
booktitle = {Proceedings of the Twenty Third International Conference on Artificial Intelligence and Statistics}, 1578 1578 booktitle = {Proceedings of the Twenty Third International Conference on Artificial Intelligence and Statistics},
pages = {3784--3794}, 1579 1579 pages = {3784--3794},
year = {2020}, 1580 1580 year = {2020},
editor = {Chiappa, Silvia and Calandra, Roberto}, 1581 1581 editor = {Chiappa, Silvia and Calandra, Roberto},
volume = {108}, 1582 1582 volume = {108},
series = {Proceedings of Machine Learning Research}, 1583 1583 series = {Proceedings of Machine Learning Research},
month = {26--28 Aug}, 1584 1584 month = {26--28 Aug},
publisher = {PMLR}, 1585 1585 publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v108/seznec20a/seznec20a.pdf}, 1586 1586 pdf = {http://proceedings.mlr.press/v108/seznec20a/seznec20a.pdf},
url = {https://proceedings.mlr.press/v108/seznec20a.html}, 1587 1587 url = {https://proceedings.mlr.press/v108/seznec20a.html},
abstract = {In many application domains (e.g., recommender systems, intelligent tutoring systems), the rewards associated to the available actions tend to decrease over time. This decay is either caused by the actions executed in the past (e.g., a user may get bored when songs of the same genre are recommended over and over) or by an external factor (e.g., content becomes outdated). These two situations can be modeled as specific instances of the rested and restless bandit settings, where arms are rotting (i.e., their value decrease over time). These problems were thought to be significantly different, since Levine et al. (2017) showed that state-of-the-art algorithms for restless bandit perform poorly in the rested rotting setting. In this paper, we introduce a novel algorithm, Rotting Adaptive Window UCB (RAW-UCB), that achieves near-optimal regret in both rotting rested and restless bandit, without any prior knowledge of the setting (rested or restless) and the type of non-stationarity (e.g., piece-wise constant, bounded variation). This is in striking contrast with previous negative results showing that no algorithm can achieve similar results as soon as rewards are allowed to increase. We confirm our theoretical findings on a number of synthetic and dataset-based experiments.} 1588 1588 abstract = {In many application domains (e.g., recommender systems, intelligent tutoring systems), the rewards associated to the available actions tend to decrease over time. This decay is either caused by the actions executed in the past (e.g., a user may get bored when songs of the same genre are recommended over and over) or by an external factor (e.g., content becomes outdated). These two situations can be modeled as specific instances of the rested and restless bandit settings, where arms are rotting (i.e., their value decrease over time). These problems were thought to be significantly different, since Levine et al. (2017) showed that state-of-the-art algorithms for restless bandit perform poorly in the rested rotting setting. In this paper, we introduce a novel algorithm, Rotting Adaptive Window UCB (RAW-UCB), that achieves near-optimal regret in both rotting rested and restless bandit, without any prior knowledge of the setting (rested or restless) and the type of non-stationarity (e.g., piece-wise constant, bounded variation). This is in striking contrast with previous negative results showing that no algorithm can achieve similar results as soon as rewards are allowed to increase. We confirm our theoretical findings on a number of synthetic and dataset-based experiments.}
} 1589 1589 }
1590 1590
@article{doi:10.3233/AIC-1994-7104, 1591 1591 @article{doi:10.3233/AIC-1994-7104,
author = {Agnar Aamodt and Enric Plaza}, 1592 1592 author = {Agnar Aamodt and Enric Plaza},
title ={Case-Based Reasoning: Foundational Issues, Methodological Variations, and System Approaches}, 1593 1593 title ={Case-Based Reasoning: Foundational Issues, Methodological Variations, and System Approaches},
journal = {AI Communications}, 1594 1594 journal = {AI Communications},
volume = {7}, 1595 1595 volume = {7},
number = {1}, 1596 1596 number = {1},
pages = {39-59}, 1597 1597 pages = {39-59},
year = {1994}, 1598 1598 year = {1994},
doi = {10.3233/AIC-1994-7104}, 1599 1599 doi = {10.3233/AIC-1994-7104},
URL = { 1600 1600 URL = {
https://journals.sagepub.com/doi/abs/10.3233/AIC-1994-7104 1601 1601 https://journals.sagepub.com/doi/abs/10.3233/AIC-1994-7104
}, 1602 1602 },
eprint = { 1603 1603 eprint = {
This is BibTeX, Version 0.99d (TeX Live 2023) 1 1 This is BibTeX, Version 0.99d (TeX Live 2023)
Capacity: max_strings=200000, hash_size=200000, hash_prime=170003 2 2 Capacity: max_strings=200000, hash_size=200000, hash_prime=170003
The top-level auxiliary file: main.aux 3 3 The top-level auxiliary file: main.aux
A level-1 auxiliary file: ./chapters/contexte2.aux 4 4 A level-1 auxiliary file: ./chapters/contexte2.aux
A level-1 auxiliary file: ./chapters/EIAH.aux 5 5 A level-1 auxiliary file: ./chapters/EIAH.aux
A level-1 auxiliary file: ./chapters/CBR.aux 6 6 A level-1 auxiliary file: ./chapters/CBR.aux
A level-1 auxiliary file: ./chapters/Architecture.aux 7 7 A level-1 auxiliary file: ./chapters/Architecture.aux
A level-1 auxiliary file: ./chapters/TS.aux 8 8 A level-1 auxiliary file: ./chapters/TS.aux
The style file: apalike.bst 9 9 The style file: apalike.bst
Database file #1: main.bib 10 10 Database file #1: main.bib
Warning--entry type for "Daubias2011" isn't style-file defined 11 11 Warning--entry type for "Daubias2011" isn't style-file defined
--line 693 of file main.bib 12 12 --line 693 of file main.bib
Warning--can't use both volume and number fields in wolf2024keep 13 13 Warning--can't use both volume and number fields in wolf2024keep
You've used 54 entries, 14 14 You've used 56 entries,
1935 wiz_defined-function locations, 15 15 1935 wiz_defined-function locations,
820 strings with 14869 characters, 16 16 831 strings with 15189 characters,
and the built_in function-call counts, 24305 in all, are: 17 17 and the built_in function-call counts, 25006 in all, are:
= -- 2341 18 18 = -- 2409
> -- 1102 19 19 > -- 1128
< -- 40 20 20 < -- 42
+ -- 390 21 21 + -- 400
- -- 365 22 22 - -- 373
* -- 2052 23 23 * -- 2111
:= -- 4140 24 24 := -- 4265
add.period$ -- 174 25 25 add.period$ -- 180
call.type$ -- 54 26 26 call.type$ -- 56
change.case$ -- 435 27 27 change.case$ -- 449
chr.to.int$ -- 53 28 28 chr.to.int$ -- 55
cite$ -- 55 29 29 cite$ -- 57
duplicate$ -- 937 30 30 duplicate$ -- 964
empty$ -- 1680 31 31 empty$ -- 1729
format.name$ -- 453 32 32 format.name$ -- 467
if$ -- 4883 33 33 if$ -- 5018
int.to.chr$ -- 2 34 34 int.to.chr$ -- 2
int.to.str$ -- 0 35 35 int.to.str$ -- 0
missing$ -- 59 36 36 missing$ -- 61
newline$ -- 273 37 37 newline$ -- 283
num.names$ -- 184 38 38 num.names$ -- 190
pop$ -- 409 39 39 pop$ -- 417
preamble$ -- 1 40 40 preamble$ -- 1
purify$ -- 440 41 41 purify$ -- 454
quote$ -- 0 42 42 quote$ -- 0
skip$ -- 709 43 43 skip$ -- 732
stack$ -- 0 44 44 stack$ -- 0
substring$ -- 1654 45 45 substring$ -- 1698
swap$ -- 180 46 46 swap$ -- 182
text.length$ -- 17 47 47 text.length$ -- 17
text.prefix$ -- 0 48 48 text.prefix$ -- 0
top$ -- 0 49 49 top$ -- 0
type$ -- 312 50 50 type$ -- 324
warning$ -- 1 51 51 warning$ -- 1
while$ -- 188 52 52 while$ -- 193
width$ -- 0 53 53 width$ -- 0
write$ -- 722 54 54 write$ -- 748
(There were 2 warnings) 55 55 (There were 2 warnings)
56 56
This is pdfTeX, Version 3.141592653-2.6-1.40.25 (TeX Live 2023) (preloaded format=pdflatex 2023.5.31) 17 APR 2025 16:01 1 1 This is pdfTeX, Version 3.141592653-2.6-1.40.25 (TeX Live 2023) (preloaded format=pdflatex 2023.5.31) 18 APR 2025 09:15
entering extended mode 2 2 entering extended mode
restricted \write18 enabled. 3 3 restricted \write18 enabled.
%&-line parsing enabled. 4 4 %&-line parsing enabled.
**main.tex 5 5 **main.tex
(./main.tex 6 6 (./main.tex
LaTeX2e <2022-11-01> patch level 1 7 7 LaTeX2e <2022-11-01> patch level 1
L3 programming layer <2023-05-22> (./spimufcphdthesis.cls 8 8 L3 programming layer <2023-05-22> (./spimufcphdthesis.cls
Document Class: spimufcphdthesis 2022/02/10 9 9 Document Class: spimufcphdthesis 2022/02/10
10 10
(/usr/local/texlive/2023/texmf-dist/tex/latex/upmethodology/upmethodology-docum 11 11 (/usr/local/texlive/2023/texmf-dist/tex/latex/upmethodology/upmethodology-docum
ent.cls 12 12 ent.cls
Document Class: upmethodology-document 2022/10/04 13 13 Document Class: upmethodology-document 2022/10/04
(./upmethodology-p-common.sty 14 14 (./upmethodology-p-common.sty
Package: upmethodology-p-common 2015/04/24 15 15 Package: upmethodology-p-common 2015/04/24
16 16
(/usr/local/texlive/2023/texmf-dist/tex/latex/base/ifthen.sty 17 17 (/usr/local/texlive/2023/texmf-dist/tex/latex/base/ifthen.sty
Package: ifthen 2022/04/13 v1.1d Standard LaTeX ifthen package (DPC) 18 18 Package: ifthen 2022/04/13 v1.1d Standard LaTeX ifthen package (DPC)
) 19 19 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/tools/xspace.sty 20 20 (/usr/local/texlive/2023/texmf-dist/tex/latex/tools/xspace.sty
Package: xspace 2014/10/28 v1.13 Space after command names (DPC,MH) 21 21 Package: xspace 2014/10/28 v1.13 Space after command names (DPC,MH)
) 22 22 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/xcolor/xcolor.sty 23 23 (/usr/local/texlive/2023/texmf-dist/tex/latex/xcolor/xcolor.sty
Package: xcolor 2022/06/12 v2.14 LaTeX color extensions (UK) 24 24 Package: xcolor 2022/06/12 v2.14 LaTeX color extensions (UK)
25 25
(/usr/local/texlive/2023/texmf-dist/tex/latex/graphics-cfg/color.cfg 26 26 (/usr/local/texlive/2023/texmf-dist/tex/latex/graphics-cfg/color.cfg
File: color.cfg 2016/01/02 v1.6 sample color configuration 27 27 File: color.cfg 2016/01/02 v1.6 sample color configuration
) 28 28 )
Package xcolor Info: Driver file: pdftex.def on input line 227. 29 29 Package xcolor Info: Driver file: pdftex.def on input line 227.
30 30
(/usr/local/texlive/2023/texmf-dist/tex/latex/graphics-def/pdftex.def 31 31 (/usr/local/texlive/2023/texmf-dist/tex/latex/graphics-def/pdftex.def
File: pdftex.def 2022/09/22 v1.2b Graphics/color driver for pdftex 32 32 File: pdftex.def 2022/09/22 v1.2b Graphics/color driver for pdftex
) 33 33 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/graphics/mathcolor.ltx) 34 34 (/usr/local/texlive/2023/texmf-dist/tex/latex/graphics/mathcolor.ltx)
Package xcolor Info: Model `cmy' substituted by `cmy0' on input line 1353. 35 35 Package xcolor Info: Model `cmy' substituted by `cmy0' on input line 1353.
Package xcolor Info: Model `hsb' substituted by `rgb' on input line 1357. 36 36 Package xcolor Info: Model `hsb' substituted by `rgb' on input line 1357.
Package xcolor Info: Model `RGB' extended on input line 1369. 37 37 Package xcolor Info: Model `RGB' extended on input line 1369.
Package xcolor Info: Model `HTML' substituted by `rgb' on input line 1371. 38 38 Package xcolor Info: Model `HTML' substituted by `rgb' on input line 1371.
Package xcolor Info: Model `Hsb' substituted by `hsb' on input line 1372. 39 39 Package xcolor Info: Model `Hsb' substituted by `hsb' on input line 1372.
Package xcolor Info: Model `tHsb' substituted by `hsb' on input line 1373. 40 40 Package xcolor Info: Model `tHsb' substituted by `hsb' on input line 1373.
Package xcolor Info: Model `HSB' substituted by `hsb' on input line 1374. 41 41 Package xcolor Info: Model `HSB' substituted by `hsb' on input line 1374.
Package xcolor Info: Model `Gray' substituted by `gray' on input line 1375. 42 42 Package xcolor Info: Model `Gray' substituted by `gray' on input line 1375.
Package xcolor Info: Model `wave' substituted by `hsb' on input line 1376. 43 43 Package xcolor Info: Model `wave' substituted by `hsb' on input line 1376.
) 44 44 )
(/usr/local/texlive/2023/texmf-dist/tex/generic/iftex/ifpdf.sty 45 45 (/usr/local/texlive/2023/texmf-dist/tex/generic/iftex/ifpdf.sty
Package: ifpdf 2019/10/25 v3.4 ifpdf legacy package. Use iftex instead. 46 46 Package: ifpdf 2019/10/25 v3.4 ifpdf legacy package. Use iftex instead.
47 47
(/usr/local/texlive/2023/texmf-dist/tex/generic/iftex/iftex.sty 48 48 (/usr/local/texlive/2023/texmf-dist/tex/generic/iftex/iftex.sty
Package: iftex 2022/02/03 v1.0f TeX engine tests 49 49 Package: iftex 2022/02/03 v1.0f TeX engine tests
)) 50 50 ))
(/usr/local/texlive/2023/texmf-dist/tex/latex/upmethodology/UPMVERSION.def)) 51 51 (/usr/local/texlive/2023/texmf-dist/tex/latex/upmethodology/UPMVERSION.def))
*********** UPMETHODOLOGY BOOK CLASS (WITH PART AND CHAPTER) 52 52 *********** UPMETHODOLOGY BOOK CLASS (WITH PART AND CHAPTER)
(/usr/local/texlive/2023/texmf-dist/tex/latex/base/book.cls 53 53 (/usr/local/texlive/2023/texmf-dist/tex/latex/base/book.cls
Document Class: book 2022/07/02 v1.4n Standard LaTeX document class 54 54 Document Class: book 2022/07/02 v1.4n Standard LaTeX document class
(/usr/local/texlive/2023/texmf-dist/tex/latex/base/bk11.clo 55 55 (/usr/local/texlive/2023/texmf-dist/tex/latex/base/bk11.clo
File: bk11.clo 2022/07/02 v1.4n Standard LaTeX file (size option) 56 56 File: bk11.clo 2022/07/02 v1.4n Standard LaTeX file (size option)
) 57 57 )
\c@part=\count185 58 58 \c@part=\count185
\c@chapter=\count186 59 59 \c@chapter=\count186
\c@section=\count187 60 60 \c@section=\count187
\c@subsection=\count188 61 61 \c@subsection=\count188
\c@subsubsection=\count189 62 62 \c@subsubsection=\count189
\c@paragraph=\count190 63 63 \c@paragraph=\count190
\c@subparagraph=\count191 64 64 \c@subparagraph=\count191
\c@figure=\count192 65 65 \c@figure=\count192
\c@table=\count193 66 66 \c@table=\count193
\abovecaptionskip=\skip48 67 67 \abovecaptionskip=\skip48
\belowcaptionskip=\skip49 68 68 \belowcaptionskip=\skip49
\bibindent=\dimen140 69 69 \bibindent=\dimen140
) 70 70 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/a4wide/a4wide.sty 71 71 (/usr/local/texlive/2023/texmf-dist/tex/latex/a4wide/a4wide.sty
Package: a4wide 1994/08/30 72 72 Package: a4wide 1994/08/30
73 73
(/usr/local/texlive/2023/texmf-dist/tex/latex/ntgclass/a4.sty 74 74 (/usr/local/texlive/2023/texmf-dist/tex/latex/ntgclass/a4.sty
Package: a4 2023/01/10 v1.2g A4 based page layout 75 75 Package: a4 2023/01/10 v1.2g A4 based page layout
)) 76 76 ))
(./upmethodology-document.sty 77 77 (./upmethodology-document.sty
Package: upmethodology-document 2015/04/24 78 78 Package: upmethodology-document 2015/04/24
79 79
**** upmethodology-document is using French language **** 80 80 **** upmethodology-document is using French language ****
(/usr/local/texlive/2023/texmf-dist/tex/generic/babel/babel.sty 81 81 (/usr/local/texlive/2023/texmf-dist/tex/generic/babel/babel.sty
Package: babel 2023/05/11 v3.89 The Babel package 82 82 Package: babel 2023/05/11 v3.89 The Babel package
\babel@savecnt=\count194 83 83 \babel@savecnt=\count194
\U@D=\dimen141 84 84 \U@D=\dimen141
\l@unhyphenated=\language87 85 85 \l@unhyphenated=\language87
86 86
(/usr/local/texlive/2023/texmf-dist/tex/generic/babel/txtbabel.def) 87 87 (/usr/local/texlive/2023/texmf-dist/tex/generic/babel/txtbabel.def)
\bbl@readstream=\read2 88 88 \bbl@readstream=\read2
\bbl@dirlevel=\count195 89 89 \bbl@dirlevel=\count195
90 90
(/usr/local/texlive/2023/texmf-dist/tex/generic/babel-french/french.ldf 91 91 (/usr/local/texlive/2023/texmf-dist/tex/generic/babel-french/french.ldf
Language: french 2023/03/08 v3.5q French support from the babel system 92 92 Language: french 2023/03/08 v3.5q French support from the babel system
Package babel Info: Hyphen rules for 'acadian' set to \l@french 93 93 Package babel Info: Hyphen rules for 'acadian' set to \l@french
(babel) (\language29). Reported on input line 91. 94 94 (babel) (\language29). Reported on input line 91.
Package babel Info: Hyphen rules for 'canadien' set to \l@french 95 95 Package babel Info: Hyphen rules for 'canadien' set to \l@french
(babel) (\language29). Reported on input line 92. 96 96 (babel) (\language29). Reported on input line 92.
\FB@nonchar=\count196 97 97 \FB@nonchar=\count196
Package babel Info: Making : an active character on input line 395. 98 98 Package babel Info: Making : an active character on input line 395.
Package babel Info: Making ; an active character on input line 396. 99 99 Package babel Info: Making ; an active character on input line 396.
Package babel Info: Making ! an active character on input line 397. 100 100 Package babel Info: Making ! an active character on input line 397.
Package babel Info: Making ? an active character on input line 398. 101 101 Package babel Info: Making ? an active character on input line 398.
\FBguill@level=\count197 102 102 \FBguill@level=\count197
\FBold@everypar=\toks16 103 103 \FBold@everypar=\toks16
\FB@Mht=\dimen142 104 104 \FB@Mht=\dimen142
\mc@charclass=\count198 105 105 \mc@charclass=\count198
\mc@charfam=\count199 106 106 \mc@charfam=\count199
\mc@charslot=\count266 107 107 \mc@charslot=\count266
\std@mcc=\count267 108 108 \std@mcc=\count267
\dec@mcc=\count268 109 109 \dec@mcc=\count268
\FB@parskip=\dimen143 110 110 \FB@parskip=\dimen143
\listindentFB=\dimen144 111 111 \listindentFB=\dimen144
\descindentFB=\dimen145 112 112 \descindentFB=\dimen145
\labelindentFB=\dimen146 113 113 \labelindentFB=\dimen146
\labelwidthFB=\dimen147 114 114 \labelwidthFB=\dimen147
\leftmarginFB=\dimen148 115 115 \leftmarginFB=\dimen148
\parindentFFN=\dimen149 116 116 \parindentFFN=\dimen149
\FBfnindent=\dimen150 117 117 \FBfnindent=\dimen150
) 118 118 )
(/usr/local/texlive/2023/texmf-dist/tex/generic/babel-french/frenchb.ldf 119 119 (/usr/local/texlive/2023/texmf-dist/tex/generic/babel-french/frenchb.ldf
Language: frenchb 2023/03/08 v3.5q French support from the babel system 120 120 Language: frenchb 2023/03/08 v3.5q French support from the babel system
121 121
122 122
Package babel-french Warning: Option `frenchb' for Babel is *deprecated*, 123 123 Package babel-french Warning: Option `frenchb' for Babel is *deprecated*,
(babel-french) it might be removed sooner or later. Please 124 124 (babel-french) it might be removed sooner or later. Please
(babel-french) use `french' instead; reported on input line 35. 125 125 (babel-french) use `french' instead; reported on input line 35.
126 126
(/usr/local/texlive/2023/texmf-dist/tex/generic/babel-french/french.ldf 127 127 (/usr/local/texlive/2023/texmf-dist/tex/generic/babel-french/french.ldf
Language: french 2023/03/08 v3.5q French support from the babel system 128 128 Language: french 2023/03/08 v3.5q French support from the babel system
))) 129 129 )))
(/usr/local/texlive/2023/texmf-dist/tex/generic/babel/locale/fr/babel-french.te 130 130 (/usr/local/texlive/2023/texmf-dist/tex/generic/babel/locale/fr/babel-french.te
x 131 131 x
Package babel Info: Importing font and identification data for french 132 132 Package babel Info: Importing font and identification data for french
(babel) from babel-fr.ini. Reported on input line 11. 133 133 (babel) from babel-fr.ini. Reported on input line 11.
) (/usr/local/texlive/2023/texmf-dist/tex/latex/carlisle/scalefnt.sty) 134 134 ) (/usr/local/texlive/2023/texmf-dist/tex/latex/carlisle/scalefnt.sty)
(/usr/local/texlive/2023/texmf-dist/tex/latex/graphics/keyval.sty 135 135 (/usr/local/texlive/2023/texmf-dist/tex/latex/graphics/keyval.sty
Package: keyval 2022/05/29 v1.15 key=value parser (DPC) 136 136 Package: keyval 2022/05/29 v1.15 key=value parser (DPC)
\KV@toks@=\toks17 137 137 \KV@toks@=\toks17
) 138 138 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/vmargin/vmargin.sty 139 139 (/usr/local/texlive/2023/texmf-dist/tex/latex/vmargin/vmargin.sty
Package: vmargin 2004/07/15 V2.5 set document margins (VK) 140 140 Package: vmargin 2004/07/15 V2.5 set document margins (VK)
141 141
Package: vmargin 2004/07/15 V2.5 set document margins (VK) 142 142 Package: vmargin 2004/07/15 V2.5 set document margins (VK)
\PaperWidth=\dimen151 143 143 \PaperWidth=\dimen151
\PaperHeight=\dimen152 144 144 \PaperHeight=\dimen152
) (./upmethodology-extension.sty 145 145 ) (./upmethodology-extension.sty
Package: upmethodology-extension 2012/09/21 146 146 Package: upmethodology-extension 2012/09/21
\upmext@tmp@putx=\skip50 147 147 \upmext@tmp@putx=\skip50
148 148
*** define extension value frontillustrationsize **** 149 149 *** define extension value frontillustrationsize ****
*** define extension value watermarksize **** 150 150 *** define extension value watermarksize ****
*** undefine extension value publisher **** 151 151 *** undefine extension value publisher ****
*** undefine extension value copyrighter **** 152 152 *** undefine extension value copyrighter ****
*** undefine extension value printedin ****) 153 153 *** undefine extension value printedin ****)
(/usr/local/texlive/2023/texmf-dist/tex/latex/upmethodology/upmethodology-fmt.s 154 154 (/usr/local/texlive/2023/texmf-dist/tex/latex/upmethodology/upmethodology-fmt.s
ty 155 155 ty
Package: upmethodology-fmt 2022/10/04 156 156 Package: upmethodology-fmt 2022/10/04
**** upmethodology-fmt is using French language **** 157 157 **** upmethodology-fmt is using French language ****
(/usr/local/texlive/2023/texmf-dist/tex/latex/graphics/graphicx.sty 158 158 (/usr/local/texlive/2023/texmf-dist/tex/latex/graphics/graphicx.sty
Package: graphicx 2021/09/16 v1.2d Enhanced LaTeX Graphics (DPC,SPQR) 159 159 Package: graphicx 2021/09/16 v1.2d Enhanced LaTeX Graphics (DPC,SPQR)
160 160
(/usr/local/texlive/2023/texmf-dist/tex/latex/graphics/graphics.sty 161 161 (/usr/local/texlive/2023/texmf-dist/tex/latex/graphics/graphics.sty
Package: graphics 2022/03/10 v1.4e Standard LaTeX Graphics (DPC,SPQR) 162 162 Package: graphics 2022/03/10 v1.4e Standard LaTeX Graphics (DPC,SPQR)
163 163
(/usr/local/texlive/2023/texmf-dist/tex/latex/graphics/trig.sty 164 164 (/usr/local/texlive/2023/texmf-dist/tex/latex/graphics/trig.sty
Package: trig 2021/08/11 v1.11 sin cos tan (DPC) 165 165 Package: trig 2021/08/11 v1.11 sin cos tan (DPC)
) 166 166 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/graphics-cfg/graphics.cfg 167 167 (/usr/local/texlive/2023/texmf-dist/tex/latex/graphics-cfg/graphics.cfg
File: graphics.cfg 2016/06/04 v1.11 sample graphics configuration 168 168 File: graphics.cfg 2016/06/04 v1.11 sample graphics configuration
) 169 169 )
Package graphics Info: Driver file: pdftex.def on input line 107. 170 170 Package graphics Info: Driver file: pdftex.def on input line 107.
) 171 171 )
\Gin@req@height=\dimen153 172 172 \Gin@req@height=\dimen153
\Gin@req@width=\dimen154 173 173 \Gin@req@width=\dimen154
) 174 174 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/caption/subcaption.sty 175 175 (/usr/local/texlive/2023/texmf-dist/tex/latex/caption/subcaption.sty
Package: subcaption 2023/02/19 v1.6 Sub-captions (AR) 176 176 Package: subcaption 2023/02/19 v1.6 Sub-captions (AR)
177 177
(/usr/local/texlive/2023/texmf-dist/tex/latex/caption/caption.sty 178 178 (/usr/local/texlive/2023/texmf-dist/tex/latex/caption/caption.sty
Package: caption 2023/03/12 v3.6j Customizing captions (AR) 179 179 Package: caption 2023/03/12 v3.6j Customizing captions (AR)
180 180
(/usr/local/texlive/2023/texmf-dist/tex/latex/caption/caption3.sty 181 181 (/usr/local/texlive/2023/texmf-dist/tex/latex/caption/caption3.sty
Package: caption3 2023/03/12 v2.4 caption3 kernel (AR) 182 182 Package: caption3 2023/03/12 v2.4 caption3 kernel (AR)
\caption@tempdima=\dimen155 183 183 \caption@tempdima=\dimen155
\captionmargin=\dimen156 184 184 \captionmargin=\dimen156
\caption@leftmargin=\dimen157 185 185 \caption@leftmargin=\dimen157
\caption@rightmargin=\dimen158 186 186 \caption@rightmargin=\dimen158
\caption@width=\dimen159 187 187 \caption@width=\dimen159
\caption@indent=\dimen160 188 188 \caption@indent=\dimen160
\caption@parindent=\dimen161 189 189 \caption@parindent=\dimen161
\caption@hangindent=\dimen162 190 190 \caption@hangindent=\dimen162
Package caption Info: Standard document class detected. 191 191 Package caption Info: Standard document class detected.
Package caption Info: french babel package is loaded. 192 192 Package caption Info: french babel package is loaded.
) 193 193 )
\c@caption@flags=\count269 194 194 \c@caption@flags=\count269
\c@continuedfloat=\count270 195 195 \c@continuedfloat=\count270
) 196 196 )
Package caption Info: New subtype `subfigure' on input line 239. 197 197 Package caption Info: New subtype `subfigure' on input line 239.
\c@subfigure=\count271 198 198 \c@subfigure=\count271
Package caption Info: New subtype `subtable' on input line 239. 199 199 Package caption Info: New subtype `subtable' on input line 239.
\c@subtable=\count272 200 200 \c@subtable=\count272
) 201 201 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/tools/tabularx.sty 202 202 (/usr/local/texlive/2023/texmf-dist/tex/latex/tools/tabularx.sty
Package: tabularx 2020/01/15 v2.11c `tabularx' package (DPC) 203 203 Package: tabularx 2020/01/15 v2.11c `tabularx' package (DPC)
204 204
(/usr/local/texlive/2023/texmf-dist/tex/latex/tools/array.sty 205 205 (/usr/local/texlive/2023/texmf-dist/tex/latex/tools/array.sty
Package: array 2022/09/04 v2.5g Tabular extension package (FMi) 206 206 Package: array 2022/09/04 v2.5g Tabular extension package (FMi)
\col@sep=\dimen163 207 207 \col@sep=\dimen163
\ar@mcellbox=\box51 208 208 \ar@mcellbox=\box51
\extrarowheight=\dimen164 209 209 \extrarowheight=\dimen164
\NC@list=\toks18 210 210 \NC@list=\toks18
\extratabsurround=\skip51 211 211 \extratabsurround=\skip51
\backup@length=\skip52 212 212 \backup@length=\skip52
\ar@cellbox=\box52 213 213 \ar@cellbox=\box52
) 214 214 )
\TX@col@width=\dimen165 215 215 \TX@col@width=\dimen165
\TX@old@table=\dimen166 216 216 \TX@old@table=\dimen166
\TX@old@col=\dimen167 217 217 \TX@old@col=\dimen167
\TX@target=\dimen168 218 218 \TX@target=\dimen168
\TX@delta=\dimen169 219 219 \TX@delta=\dimen169
\TX@cols=\count273 220 220 \TX@cols=\count273
\TX@ftn=\toks19 221 221 \TX@ftn=\toks19
) 222 222 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/tools/multicol.sty 223 223 (/usr/local/texlive/2023/texmf-dist/tex/latex/tools/multicol.sty
Package: multicol 2021/11/30 v1.9d multicolumn formatting (FMi) 224 224 Package: multicol 2021/11/30 v1.9d multicolumn formatting (FMi)
\c@tracingmulticols=\count274 225 225 \c@tracingmulticols=\count274
\mult@box=\box53 226 226 \mult@box=\box53
\multicol@leftmargin=\dimen170 227 227 \multicol@leftmargin=\dimen170
\c@unbalance=\count275 228 228 \c@unbalance=\count275
\c@collectmore=\count276 229 229 \c@collectmore=\count276
\doublecol@number=\count277 230 230 \doublecol@number=\count277
\multicoltolerance=\count278 231 231 \multicoltolerance=\count278
\multicolpretolerance=\count279 232 232 \multicolpretolerance=\count279
\full@width=\dimen171 233 233 \full@width=\dimen171
\page@free=\dimen172 234 234 \page@free=\dimen172
\premulticols=\dimen173 235 235 \premulticols=\dimen173
\postmulticols=\dimen174 236 236 \postmulticols=\dimen174
\multicolsep=\skip53 237 237 \multicolsep=\skip53
\multicolbaselineskip=\skip54 238 238 \multicolbaselineskip=\skip54
\partial@page=\box54 239 239 \partial@page=\box54
\last@line=\box55 240 240 \last@line=\box55
\maxbalancingoverflow=\dimen175 241 241 \maxbalancingoverflow=\dimen175
\mult@rightbox=\box56 242 242 \mult@rightbox=\box56
\mult@grightbox=\box57 243 243 \mult@grightbox=\box57
\mult@firstbox=\box58 244 244 \mult@firstbox=\box58
\mult@gfirstbox=\box59 245 245 \mult@gfirstbox=\box59
\@tempa=\box60 246 246 \@tempa=\box60
\@tempa=\box61 247 247 \@tempa=\box61
\@tempa=\box62 248 248 \@tempa=\box62
\@tempa=\box63 249 249 \@tempa=\box63
\@tempa=\box64 250 250 \@tempa=\box64
\@tempa=\box65 251 251 \@tempa=\box65
\@tempa=\box66 252 252 \@tempa=\box66
\@tempa=\box67 253 253 \@tempa=\box67
\@tempa=\box68 254 254 \@tempa=\box68
\@tempa=\box69 255 255 \@tempa=\box69
\@tempa=\box70 256 256 \@tempa=\box70
\@tempa=\box71 257 257 \@tempa=\box71
\@tempa=\box72 258 258 \@tempa=\box72
\@tempa=\box73 259 259 \@tempa=\box73
\@tempa=\box74 260 260 \@tempa=\box74
\@tempa=\box75 261 261 \@tempa=\box75
\@tempa=\box76 262 262 \@tempa=\box76
\@tempa=\box77 263 263 \@tempa=\box77
\@tempa=\box78 264 264 \@tempa=\box78
\@tempa=\box79 265 265 \@tempa=\box79
\@tempa=\box80 266 266 \@tempa=\box80
\@tempa=\box81 267 267 \@tempa=\box81
\@tempa=\box82 268 268 \@tempa=\box82
\@tempa=\box83 269 269 \@tempa=\box83
\@tempa=\box84 270 270 \@tempa=\box84
\@tempa=\box85 271 271 \@tempa=\box85
\@tempa=\box86 272 272 \@tempa=\box86
\@tempa=\box87 273 273 \@tempa=\box87
\@tempa=\box88 274 274 \@tempa=\box88
\@tempa=\box89 275 275 \@tempa=\box89
\@tempa=\box90 276 276 \@tempa=\box90
\@tempa=\box91 277 277 \@tempa=\box91
\@tempa=\box92 278 278 \@tempa=\box92
\@tempa=\box93 279 279 \@tempa=\box93
\@tempa=\box94 280 280 \@tempa=\box94
\@tempa=\box95 281 281 \@tempa=\box95
\c@minrows=\count280 282 282 \c@minrows=\count280
\c@columnbadness=\count281 283 283 \c@columnbadness=\count281
\c@finalcolumnbadness=\count282 284 284 \c@finalcolumnbadness=\count282
\last@try=\dimen176 285 285 \last@try=\dimen176
\multicolovershoot=\dimen177 286 286 \multicolovershoot=\dimen177
\multicolundershoot=\dimen178 287 287 \multicolundershoot=\dimen178
\mult@nat@firstbox=\box96 288 288 \mult@nat@firstbox=\box96
\colbreak@box=\box97 289 289 \colbreak@box=\box97
\mc@col@check@num=\count283 290 290 \mc@col@check@num=\count283
) 291 291 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/colortbl/colortbl.sty 292 292 (/usr/local/texlive/2023/texmf-dist/tex/latex/colortbl/colortbl.sty
Package: colortbl 2022/06/20 v1.0f Color table columns (DPC) 293 293 Package: colortbl 2022/06/20 v1.0f Color table columns (DPC)
\everycr=\toks20 294 294 \everycr=\toks20
\minrowclearance=\skip55 295 295 \minrowclearance=\skip55
\rownum=\count284 296 296 \rownum=\count284
) 297 297 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/picinpar/picinpar.sty 298 298 (/usr/local/texlive/2023/texmf-dist/tex/latex/picinpar/picinpar.sty
Pictures in Paragraphs. Version 1.3, November 22, 2022 299 299 Pictures in Paragraphs. Version 1.3, November 22, 2022
\br=\count285 300 300 \br=\count285
\bl=\count286 301 301 \bl=\count286
\na=\count287 302 302 \na=\count287
\nb=\count288 303 303 \nb=\count288
\tcdsav=\count289 304 304 \tcdsav=\count289
\tcl=\count290 305 305 \tcl=\count290
\tcd=\count291 306 306 \tcd=\count291
\tcn=\count292 307 307 \tcn=\count292
\cumtcl=\count293 308 308 \cumtcl=\count293
\cumpartcl=\count294 309 309 \cumpartcl=\count294
\lftside=\dimen179 310 310 \lftside=\dimen179
\rtside=\dimen180 311 311 \rtside=\dimen180
\hpic=\dimen181 312 312 \hpic=\dimen181
\vpic=\dimen182 313 313 \vpic=\dimen182
\strutilg=\dimen183 314 314 \strutilg=\dimen183
\picwd=\dimen184 315 315 \picwd=\dimen184
\topheight=\dimen185 316 316 \topheight=\dimen185
\ilg=\dimen186 317 317 \ilg=\dimen186
\lpic=\dimen187 318 318 \lpic=\dimen187
\lwindowsep=\dimen188 319 319 \lwindowsep=\dimen188
\rwindowsep=\dimen189 320 320 \rwindowsep=\dimen189
\cumpar=\dimen190 321 321 \cumpar=\dimen190
\twa=\toks21 322 322 \twa=\toks21
\la=\toks22 323 323 \la=\toks22
\ra=\toks23 324 324 \ra=\toks23
\ha=\toks24 325 325 \ha=\toks24
\pictoc=\toks25 326 326 \pictoc=\toks25
\rawtext=\box98 327 327 \rawtext=\box98
\holder=\box99 328 328 \holder=\box99
\windowbox=\box100 329 329 \windowbox=\box100
\wartext=\box101 330 330 \wartext=\box101
\finaltext=\box102 331 331 \finaltext=\box102
\aslice=\box103 332 332 \aslice=\box103
\bslice=\box104 333 333 \bslice=\box104
\wbox=\box105 334 334 \wbox=\box105
\wstrutbox=\box106 335 335 \wstrutbox=\box106
\picbox=\box107 336 336 \picbox=\box107
\waslice=\box108 337 337 \waslice=\box108
\wbslice=\box109 338 338 \wbslice=\box109
\fslice=\box110 339 339 \fslice=\box110
) (/usr/local/texlive/2023/texmf-dist/tex/latex/amsmath/amsmath.sty 340 340 ) (/usr/local/texlive/2023/texmf-dist/tex/latex/amsmath/amsmath.sty
Package: amsmath 2022/04/08 v2.17n AMS math features 341 341 Package: amsmath 2022/04/08 v2.17n AMS math features
\@mathmargin=\skip56 342 342 \@mathmargin=\skip56
343 343
For additional information on amsmath, use the `?' option. 344 344 For additional information on amsmath, use the `?' option.
(/usr/local/texlive/2023/texmf-dist/tex/latex/amsmath/amstext.sty 345 345 (/usr/local/texlive/2023/texmf-dist/tex/latex/amsmath/amstext.sty
Package: amstext 2021/08/26 v2.01 AMS text 346 346 Package: amstext 2021/08/26 v2.01 AMS text
347 347
(/usr/local/texlive/2023/texmf-dist/tex/latex/amsmath/amsgen.sty 348 348 (/usr/local/texlive/2023/texmf-dist/tex/latex/amsmath/amsgen.sty
File: amsgen.sty 1999/11/30 v2.0 generic functions 349 349 File: amsgen.sty 1999/11/30 v2.0 generic functions
\@emptytoks=\toks26 350 350 \@emptytoks=\toks26
\ex@=\dimen191 351 351 \ex@=\dimen191
)) 352 352 ))
(/usr/local/texlive/2023/texmf-dist/tex/latex/amsmath/amsbsy.sty 353 353 (/usr/local/texlive/2023/texmf-dist/tex/latex/amsmath/amsbsy.sty
Package: amsbsy 1999/11/29 v1.2d Bold Symbols 354 354 Package: amsbsy 1999/11/29 v1.2d Bold Symbols
\pmbraise@=\dimen192 355 355 \pmbraise@=\dimen192
) 356 356 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/amsmath/amsopn.sty 357 357 (/usr/local/texlive/2023/texmf-dist/tex/latex/amsmath/amsopn.sty
Package: amsopn 2022/04/08 v2.04 operator names 358 358 Package: amsopn 2022/04/08 v2.04 operator names
) 359 359 )
\inf@bad=\count295 360 360 \inf@bad=\count295
LaTeX Info: Redefining \frac on input line 234. 361 361 LaTeX Info: Redefining \frac on input line 234.
\uproot@=\count296 362 362 \uproot@=\count296
\leftroot@=\count297 363 363 \leftroot@=\count297
LaTeX Info: Redefining \overline on input line 399. 364 364 LaTeX Info: Redefining \overline on input line 399.
LaTeX Info: Redefining \colon on input line 410. 365 365 LaTeX Info: Redefining \colon on input line 410.
\classnum@=\count298 366 366 \classnum@=\count298
\DOTSCASE@=\count299 367 367 \DOTSCASE@=\count299
LaTeX Info: Redefining \ldots on input line 496. 368 368 LaTeX Info: Redefining \ldots on input line 496.
LaTeX Info: Redefining \dots on input line 499. 369 369 LaTeX Info: Redefining \dots on input line 499.
LaTeX Info: Redefining \cdots on input line 620. 370 370 LaTeX Info: Redefining \cdots on input line 620.
\Mathstrutbox@=\box111 371 371 \Mathstrutbox@=\box111
\strutbox@=\box112 372 372 \strutbox@=\box112
LaTeX Info: Redefining \big on input line 722. 373 373 LaTeX Info: Redefining \big on input line 722.
LaTeX Info: Redefining \Big on input line 723. 374 374 LaTeX Info: Redefining \Big on input line 723.
LaTeX Info: Redefining \bigg on input line 724. 375 375 LaTeX Info: Redefining \bigg on input line 724.
LaTeX Info: Redefining \Bigg on input line 725. 376 376 LaTeX Info: Redefining \Bigg on input line 725.
\big@size=\dimen193 377 377 \big@size=\dimen193
LaTeX Font Info: Redeclaring font encoding OML on input line 743. 378 378 LaTeX Font Info: Redeclaring font encoding OML on input line 743.
LaTeX Font Info: Redeclaring font encoding OMS on input line 744. 379 379 LaTeX Font Info: Redeclaring font encoding OMS on input line 744.
\macc@depth=\count300 380 380 \macc@depth=\count300
LaTeX Info: Redefining \bmod on input line 905. 381 381 LaTeX Info: Redefining \bmod on input line 905.
LaTeX Info: Redefining \pmod on input line 910. 382 382 LaTeX Info: Redefining \pmod on input line 910.
LaTeX Info: Redefining \smash on input line 940. 383 383 LaTeX Info: Redefining \smash on input line 940.
LaTeX Info: Redefining \relbar on input line 970. 384 384 LaTeX Info: Redefining \relbar on input line 970.
LaTeX Info: Redefining \Relbar on input line 971. 385 385 LaTeX Info: Redefining \Relbar on input line 971.
\c@MaxMatrixCols=\count301 386 386 \c@MaxMatrixCols=\count301
\dotsspace@=\muskip16 387 387 \dotsspace@=\muskip16
\c@parentequation=\count302 388 388 \c@parentequation=\count302
\dspbrk@lvl=\count303 389 389 \dspbrk@lvl=\count303
\tag@help=\toks27 390 390 \tag@help=\toks27
\row@=\count304 391 391 \row@=\count304
\column@=\count305 392 392 \column@=\count305
\maxfields@=\count306 393 393 \maxfields@=\count306
\andhelp@=\toks28 394 394 \andhelp@=\toks28
\eqnshift@=\dimen194 395 395 \eqnshift@=\dimen194
\alignsep@=\dimen195 396 396 \alignsep@=\dimen195
\tagshift@=\dimen196 397 397 \tagshift@=\dimen196
\tagwidth@=\dimen197 398 398 \tagwidth@=\dimen197
\totwidth@=\dimen198 399 399 \totwidth@=\dimen198
\lineht@=\dimen199 400 400 \lineht@=\dimen199
\@envbody=\toks29 401 401 \@envbody=\toks29
\multlinegap=\skip57 402 402 \multlinegap=\skip57
\multlinetaggap=\skip58 403 403 \multlinetaggap=\skip58
\mathdisplay@stack=\toks30 404 404 \mathdisplay@stack=\toks30
LaTeX Info: Redefining \[ on input line 2953. 405 405 LaTeX Info: Redefining \[ on input line 2953.
LaTeX Info: Redefining \] on input line 2954. 406 406 LaTeX Info: Redefining \] on input line 2954.
) 407 407 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/amscls/amsthm.sty 408 408 (/usr/local/texlive/2023/texmf-dist/tex/latex/amscls/amsthm.sty
Package: amsthm 2020/05/29 v2.20.6 409 409 Package: amsthm 2020/05/29 v2.20.6
\thm@style=\toks31 410 410 \thm@style=\toks31
\thm@bodyfont=\toks32 411 411 \thm@bodyfont=\toks32
\thm@headfont=\toks33 412 412 \thm@headfont=\toks33
\thm@notefont=\toks34 413 413 \thm@notefont=\toks34
\thm@headpunct=\toks35 414 414 \thm@headpunct=\toks35
\thm@preskip=\skip59 415 415 \thm@preskip=\skip59
\thm@postskip=\skip60 416 416 \thm@postskip=\skip60
\thm@headsep=\skip61 417 417 \thm@headsep=\skip61
\dth@everypar=\toks36 418 418 \dth@everypar=\toks36
) 419 419 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/thmtools/thmtools.sty 420 420 (/usr/local/texlive/2023/texmf-dist/tex/latex/thmtools/thmtools.sty
Package: thmtools 2023/05/04 v0.76 421 421 Package: thmtools 2023/05/04 v0.76
\thmt@toks=\toks37 422 422 \thmt@toks=\toks37
\c@thmt@dummyctr=\count307 423 423 \c@thmt@dummyctr=\count307
424 424
(/usr/local/texlive/2023/texmf-dist/tex/latex/thmtools/thm-patch.sty 425 425 (/usr/local/texlive/2023/texmf-dist/tex/latex/thmtools/thm-patch.sty
Package: thm-patch 2023/05/04 v0.76 426 426 Package: thm-patch 2023/05/04 v0.76
427 427
(/usr/local/texlive/2023/texmf-dist/tex/latex/thmtools/parseargs.sty 428 428 (/usr/local/texlive/2023/texmf-dist/tex/latex/thmtools/parseargs.sty
Package: parseargs 2023/05/04 v0.76 429 429 Package: parseargs 2023/05/04 v0.76
\@parsespec=\toks38 430 430 \@parsespec=\toks38
)) 431 431 ))
(/usr/local/texlive/2023/texmf-dist/tex/latex/thmtools/thm-kv.sty 432 432 (/usr/local/texlive/2023/texmf-dist/tex/latex/thmtools/thm-kv.sty
Package: thm-kv 2023/05/04 v0.76 433 433 Package: thm-kv 2023/05/04 v0.76
Package thm-kv Info: Theorem names will be uppercased on input line 42. 434 434 Package thm-kv Info: Theorem names will be uppercased on input line 42.
435 435
(/usr/local/texlive/2023/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty 436 436 (/usr/local/texlive/2023/texmf-dist/tex/latex/kvsetkeys/kvsetkeys.sty
Package: kvsetkeys 2022-10-05 v1.19 Key value parser (HO) 437 437 Package: kvsetkeys 2022-10-05 v1.19 Key value parser (HO)
) 438 438 )
Package thm-kv Info: kvsetkeys patch (v1.16 or later) on input line 158. 439 439 Package thm-kv Info: kvsetkeys patch (v1.16 or later) on input line 158.
) 440 440 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/thmtools/thm-autoref.sty 441 441 (/usr/local/texlive/2023/texmf-dist/tex/latex/thmtools/thm-autoref.sty
Package: thm-autoref 2023/05/04 v0.76 442 442 Package: thm-autoref 2023/05/04 v0.76
443 443
(/usr/local/texlive/2023/texmf-dist/tex/latex/thmtools/aliasctr.sty 444 444 (/usr/local/texlive/2023/texmf-dist/tex/latex/thmtools/aliasctr.sty
Package: aliasctr 2023/05/04 v0.76 445 445 Package: aliasctr 2023/05/04 v0.76
)) 446 446 ))
(/usr/local/texlive/2023/texmf-dist/tex/latex/thmtools/thm-listof.sty 447 447 (/usr/local/texlive/2023/texmf-dist/tex/latex/thmtools/thm-listof.sty
Package: thm-listof 2023/05/04 v0.76 448 448 Package: thm-listof 2023/05/04 v0.76
) 449 449 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/thmtools/thm-restate.sty 450 450 (/usr/local/texlive/2023/texmf-dist/tex/latex/thmtools/thm-restate.sty
Package: thm-restate 2023/05/04 v0.76 451 451 Package: thm-restate 2023/05/04 v0.76
) 452 452 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/thmtools/thm-amsthm.sty 453 453 (/usr/local/texlive/2023/texmf-dist/tex/latex/thmtools/thm-amsthm.sty
Package: thm-amsthm 2023/05/04 v0.76 454 454 Package: thm-amsthm 2023/05/04 v0.76
\thmt@style@headstyle=\toks39 455 455 \thmt@style@headstyle=\toks39
)) 456 456 ))
(/usr/local/texlive/2023/texmf-dist/tex/latex/psnfss/pifont.sty 457 457 (/usr/local/texlive/2023/texmf-dist/tex/latex/psnfss/pifont.sty
Package: pifont 2020/03/25 PSNFSS-v9.3 Pi font support (SPQR) 458 458 Package: pifont 2020/03/25 PSNFSS-v9.3 Pi font support (SPQR)
LaTeX Font Info: Trying to load font information for U+pzd on input line 63. 459 459 LaTeX Font Info: Trying to load font information for U+pzd on input line 63.
460 460
461 461
(/usr/local/texlive/2023/texmf-dist/tex/latex/psnfss/upzd.fd 462 462 (/usr/local/texlive/2023/texmf-dist/tex/latex/psnfss/upzd.fd
File: upzd.fd 2001/06/04 font definitions for U/pzd. 463 463 File: upzd.fd 2001/06/04 font definitions for U/pzd.
) 464 464 )
LaTeX Font Info: Trying to load font information for U+psy on input line 64. 465 465 LaTeX Font Info: Trying to load font information for U+psy on input line 64.
466 466
467 467
(/usr/local/texlive/2023/texmf-dist/tex/latex/psnfss/upsy.fd 468 468 (/usr/local/texlive/2023/texmf-dist/tex/latex/psnfss/upsy.fd
File: upsy.fd 2001/06/04 font definitions for U/psy. 469 469 File: upsy.fd 2001/06/04 font definitions for U/psy.
)) 470 470 ))
(/usr/local/texlive/2023/texmf-dist/tex/latex/setspace/setspace.sty 471 471 (/usr/local/texlive/2023/texmf-dist/tex/latex/setspace/setspace.sty
Package: setspace 2022/12/04 v6.7b set line spacing 472 472 Package: setspace 2022/12/04 v6.7b set line spacing
) 473 473 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/tools/varioref.sty 474 474 (/usr/local/texlive/2023/texmf-dist/tex/latex/tools/varioref.sty
Package: varioref 2022/01/09 v1.6f package for extended references (FMi) 475 475 Package: varioref 2022/01/09 v1.6f package for extended references (FMi)
\c@vrcnt=\count308 476 476 \c@vrcnt=\count308
) 477 477 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/txfonts/txfonts.sty 478 478 (/usr/local/texlive/2023/texmf-dist/tex/latex/txfonts/txfonts.sty
Package: txfonts 2008/01/22 v3.2.1 479 479 Package: txfonts 2008/01/22 v3.2.1
LaTeX Font Info: Redeclaring symbol font `operators' on input line 21. 480 480 LaTeX Font Info: Redeclaring symbol font `operators' on input line 21.
LaTeX Font Info: Overwriting symbol font `operators' in version `normal' 481 481 LaTeX Font Info: Overwriting symbol font `operators' in version `normal'
(Font) OT1/cmr/m/n --> OT1/txr/m/n on input line 21. 482 482 (Font) OT1/cmr/m/n --> OT1/txr/m/n on input line 21.
LaTeX Font Info: Overwriting symbol font `operators' in version `bold' 483 483 LaTeX Font Info: Overwriting symbol font `operators' in version `bold'
(Font) OT1/cmr/bx/n --> OT1/txr/m/n on input line 21. 484 484 (Font) OT1/cmr/bx/n --> OT1/txr/m/n on input line 21.
LaTeX Font Info: Overwriting symbol font `operators' in version `bold' 485 485 LaTeX Font Info: Overwriting symbol font `operators' in version `bold'
(Font) OT1/txr/m/n --> OT1/txr/bx/n on input line 22. 486 486 (Font) OT1/txr/m/n --> OT1/txr/bx/n on input line 22.
\symitalic=\mathgroup4 487 487 \symitalic=\mathgroup4
LaTeX Font Info: Overwriting symbol font `italic' in version `bold' 488 488 LaTeX Font Info: Overwriting symbol font `italic' in version `bold'
(Font) OT1/txr/m/it --> OT1/txr/bx/it on input line 26. 489 489 (Font) OT1/txr/m/it --> OT1/txr/bx/it on input line 26.
LaTeX Font Info: Redeclaring math alphabet \mathbf on input line 29. 490 490 LaTeX Font Info: Redeclaring math alphabet \mathbf on input line 29.
LaTeX Font Info: Overwriting math alphabet `\mathbf' in version `normal' 491 491 LaTeX Font Info: Overwriting math alphabet `\mathbf' in version `normal'
(Font) OT1/cmr/bx/n --> OT1/txr/bx/n on input line 29. 492 492 (Font) OT1/cmr/bx/n --> OT1/txr/bx/n on input line 29.
LaTeX Font Info: Overwriting math alphabet `\mathbf' in version `bold' 493 493 LaTeX Font Info: Overwriting math alphabet `\mathbf' in version `bold'
(Font) OT1/cmr/bx/n --> OT1/txr/bx/n on input line 29. 494 494 (Font) OT1/cmr/bx/n --> OT1/txr/bx/n on input line 29.
LaTeX Font Info: Redeclaring math alphabet \mathit on input line 30. 495 495 LaTeX Font Info: Redeclaring math alphabet \mathit on input line 30.
LaTeX Font Info: Overwriting math alphabet `\mathit' in version `normal' 496 496 LaTeX Font Info: Overwriting math alphabet `\mathit' in version `normal'
(Font) OT1/cmr/m/it --> OT1/txr/m/it on input line 30. 497 497 (Font) OT1/cmr/m/it --> OT1/txr/m/it on input line 30.
LaTeX Font Info: Overwriting math alphabet `\mathit' in version `bold' 498 498 LaTeX Font Info: Overwriting math alphabet `\mathit' in version `bold'
(Font) OT1/cmr/bx/it --> OT1/txr/m/it on input line 30. 499 499 (Font) OT1/cmr/bx/it --> OT1/txr/m/it on input line 30.
LaTeX Font Info: Overwriting math alphabet `\mathit' in version `bold' 500 500 LaTeX Font Info: Overwriting math alphabet `\mathit' in version `bold'
(Font) OT1/txr/m/it --> OT1/txr/bx/it on input line 31. 501 501 (Font) OT1/txr/m/it --> OT1/txr/bx/it on input line 31.
LaTeX Font Info: Redeclaring math alphabet \mathsf on input line 40. 502 502 LaTeX Font Info: Redeclaring math alphabet \mathsf on input line 40.
LaTeX Font Info: Overwriting math alphabet `\mathsf' in version `normal' 503 503 LaTeX Font Info: Overwriting math alphabet `\mathsf' in version `normal'
(Font) OT1/cmss/m/n --> OT1/txss/m/n on input line 40. 504 504 (Font) OT1/cmss/m/n --> OT1/txss/m/n on input line 40.
LaTeX Font Info: Overwriting math alphabet `\mathsf' in version `bold' 505 505 LaTeX Font Info: Overwriting math alphabet `\mathsf' in version `bold'
(Font) OT1/cmss/bx/n --> OT1/txss/m/n on input line 40. 506 506 (Font) OT1/cmss/bx/n --> OT1/txss/m/n on input line 40.
LaTeX Font Info: Overwriting math alphabet `\mathsf' in version `bold' 507 507 LaTeX Font Info: Overwriting math alphabet `\mathsf' in version `bold'
(Font) OT1/txss/m/n --> OT1/txss/b/n on input line 41. 508 508 (Font) OT1/txss/m/n --> OT1/txss/b/n on input line 41.
LaTeX Font Info: Redeclaring math alphabet \mathtt on input line 50. 509 509 LaTeX Font Info: Redeclaring math alphabet \mathtt on input line 50.
LaTeX Font Info: Overwriting math alphabet `\mathtt' in version `normal' 510 510 LaTeX Font Info: Overwriting math alphabet `\mathtt' in version `normal'
(Font) OT1/cmtt/m/n --> OT1/txtt/m/n on input line 50. 511 511 (Font) OT1/cmtt/m/n --> OT1/txtt/m/n on input line 50.
LaTeX Font Info: Overwriting math alphabet `\mathtt' in version `bold' 512 512 LaTeX Font Info: Overwriting math alphabet `\mathtt' in version `bold'
(Font) OT1/cmtt/m/n --> OT1/txtt/m/n on input line 50. 513 513 (Font) OT1/cmtt/m/n --> OT1/txtt/m/n on input line 50.
LaTeX Font Info: Overwriting math alphabet `\mathtt' in version `bold' 514 514 LaTeX Font Info: Overwriting math alphabet `\mathtt' in version `bold'
(Font) OT1/txtt/m/n --> OT1/txtt/b/n on input line 51. 515 515 (Font) OT1/txtt/m/n --> OT1/txtt/b/n on input line 51.
LaTeX Font Info: Redeclaring symbol font `letters' on input line 58. 516 516 LaTeX Font Info: Redeclaring symbol font `letters' on input line 58.
LaTeX Font Info: Overwriting symbol font `letters' in version `normal' 517 517 LaTeX Font Info: Overwriting symbol font `letters' in version `normal'
(Font) OML/cmm/m/it --> OML/txmi/m/it on input line 58. 518 518 (Font) OML/cmm/m/it --> OML/txmi/m/it on input line 58.
LaTeX Font Info: Overwriting symbol font `letters' in version `bold' 519 519 LaTeX Font Info: Overwriting symbol font `letters' in version `bold'
(Font) OML/cmm/b/it --> OML/txmi/m/it on input line 58. 520 520 (Font) OML/cmm/b/it --> OML/txmi/m/it on input line 58.
LaTeX Font Info: Overwriting symbol font `letters' in version `bold' 521 521 LaTeX Font Info: Overwriting symbol font `letters' in version `bold'
(Font) OML/txmi/m/it --> OML/txmi/bx/it on input line 59. 522 522 (Font) OML/txmi/m/it --> OML/txmi/bx/it on input line 59.
\symlettersA=\mathgroup5 523 523 \symlettersA=\mathgroup5
LaTeX Font Info: Overwriting symbol font `lettersA' in version `bold' 524 524 LaTeX Font Info: Overwriting symbol font `lettersA' in version `bold'
(Font) U/txmia/m/it --> U/txmia/bx/it on input line 67. 525 525 (Font) U/txmia/m/it --> U/txmia/bx/it on input line 67.
LaTeX Font Info: Redeclaring symbol font `symbols' on input line 77. 526 526 LaTeX Font Info: Redeclaring symbol font `symbols' on input line 77.
LaTeX Font Info: Overwriting symbol font `symbols' in version `normal' 527 527 LaTeX Font Info: Overwriting symbol font `symbols' in version `normal'
(Font) OMS/cmsy/m/n --> OMS/txsy/m/n on input line 77. 528 528 (Font) OMS/cmsy/m/n --> OMS/txsy/m/n on input line 77.
LaTeX Font Info: Overwriting symbol font `symbols' in version `bold' 529 529 LaTeX Font Info: Overwriting symbol font `symbols' in version `bold'
(Font) OMS/cmsy/b/n --> OMS/txsy/m/n on input line 77. 530 530 (Font) OMS/cmsy/b/n --> OMS/txsy/m/n on input line 77.
LaTeX Font Info: Overwriting symbol font `symbols' in version `bold' 531 531 LaTeX Font Info: Overwriting symbol font `symbols' in version `bold'
(Font) OMS/txsy/m/n --> OMS/txsy/bx/n on input line 78. 532 532 (Font) OMS/txsy/m/n --> OMS/txsy/bx/n on input line 78.
\symAMSa=\mathgroup6 533 533 \symAMSa=\mathgroup6
LaTeX Font Info: Overwriting symbol font `AMSa' in version `bold' 534 534 LaTeX Font Info: Overwriting symbol font `AMSa' in version `bold'
(Font) U/txsya/m/n --> U/txsya/bx/n on input line 94. 535 535 (Font) U/txsya/m/n --> U/txsya/bx/n on input line 94.
\symAMSb=\mathgroup7 536 536 \symAMSb=\mathgroup7
LaTeX Font Info: Overwriting symbol font `AMSb' in version `bold' 537 537 LaTeX Font Info: Overwriting symbol font `AMSb' in version `bold'
(Font) U/txsyb/m/n --> U/txsyb/bx/n on input line 103. 538 538 (Font) U/txsyb/m/n --> U/txsyb/bx/n on input line 103.
\symsymbolsC=\mathgroup8 539 539 \symsymbolsC=\mathgroup8
LaTeX Font Info: Overwriting symbol font `symbolsC' in version `bold' 540 540 LaTeX Font Info: Overwriting symbol font `symbolsC' in version `bold'
(Font) U/txsyc/m/n --> U/txsyc/bx/n on input line 113. 541 541 (Font) U/txsyc/m/n --> U/txsyc/bx/n on input line 113.
LaTeX Font Info: Redeclaring symbol font `largesymbols' on input line 120. 542 542 LaTeX Font Info: Redeclaring symbol font `largesymbols' on input line 120.
LaTeX Font Info: Overwriting symbol font `largesymbols' in version `normal' 543 543 LaTeX Font Info: Overwriting symbol font `largesymbols' in version `normal'
(Font) OMX/cmex/m/n --> OMX/txex/m/n on input line 120. 544 544 (Font) OMX/cmex/m/n --> OMX/txex/m/n on input line 120.
LaTeX Font Info: Overwriting symbol font `largesymbols' in version `bold' 545 545 LaTeX Font Info: Overwriting symbol font `largesymbols' in version `bold'
(Font) OMX/cmex/m/n --> OMX/txex/m/n on input line 120. 546 546 (Font) OMX/cmex/m/n --> OMX/txex/m/n on input line 120.
LaTeX Font Info: Overwriting symbol font `largesymbols' in version `bold' 547 547 LaTeX Font Info: Overwriting symbol font `largesymbols' in version `bold'
(Font) OMX/txex/m/n --> OMX/txex/bx/n on input line 121. 548 548 (Font) OMX/txex/m/n --> OMX/txex/bx/n on input line 121.
\symlargesymbolsA=\mathgroup9 549 549 \symlargesymbolsA=\mathgroup9
LaTeX Font Info: Overwriting symbol font `largesymbolsA' in version `bold' 550 550 LaTeX Font Info: Overwriting symbol font `largesymbolsA' in version `bold'
(Font) U/txexa/m/n --> U/txexa/bx/n on input line 129. 551 551 (Font) U/txexa/m/n --> U/txexa/bx/n on input line 129.
LaTeX Font Info: Redeclaring math symbol \mathsterling on input line 164. 552 552 LaTeX Font Info: Redeclaring math symbol \mathsterling on input line 164.
LaTeX Font Info: Redeclaring math symbol \hbar on input line 591. 553 553 LaTeX Font Info: Redeclaring math symbol \hbar on input line 591.
LaTeX Info: Redefining \not on input line 1043. 554 554 LaTeX Info: Redefining \not on input line 1043.
LaTeX Info: Redefining \textsquare on input line 1063. 555 555 LaTeX Info: Redefining \textsquare on input line 1063.
LaTeX Info: Redefining \openbox on input line 1064. 556 556 LaTeX Info: Redefining \openbox on input line 1064.
) 557 557 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/relsize/relsize.sty 558 558 (/usr/local/texlive/2023/texmf-dist/tex/latex/relsize/relsize.sty
Package: relsize 2013/03/29 ver 4.1 559 559 Package: relsize 2013/03/29 ver 4.1
) 560 560 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/xkeyval/xkeyval.sty 561 561 (/usr/local/texlive/2023/texmf-dist/tex/latex/xkeyval/xkeyval.sty
Package: xkeyval 2022/06/16 v2.9 package option processing (HA) 562 562 Package: xkeyval 2022/06/16 v2.9 package option processing (HA)
563 563
(/usr/local/texlive/2023/texmf-dist/tex/generic/xkeyval/xkeyval.tex 564 564 (/usr/local/texlive/2023/texmf-dist/tex/generic/xkeyval/xkeyval.tex
(/usr/local/texlive/2023/texmf-dist/tex/generic/xkeyval/xkvutils.tex 565 565 (/usr/local/texlive/2023/texmf-dist/tex/generic/xkeyval/xkvutils.tex
\XKV@toks=\toks40 566 566 \XKV@toks=\toks40
\XKV@tempa@toks=\toks41 567 567 \XKV@tempa@toks=\toks41
) 568 568 )
\XKV@depth=\count309 569 569 \XKV@depth=\count309
File: xkeyval.tex 2014/12/03 v2.7a key=value parser (HA) 570 570 File: xkeyval.tex 2014/12/03 v2.7a key=value parser (HA)
)) 571 571 ))
(/usr/local/texlive/2023/texmf-dist/tex/latex/hyphenat/hyphenat.sty 572 572 (/usr/local/texlive/2023/texmf-dist/tex/latex/hyphenat/hyphenat.sty
Package: hyphenat 2009/09/02 v2.3c hyphenation utilities 573 573 Package: hyphenat 2009/09/02 v2.3c hyphenation utilities
\langwohyphens=\language88 574 574 \langwohyphens=\language88
LaTeX Info: Redefining \_ on input line 43. 575 575 LaTeX Info: Redefining \_ on input line 43.
) 576 576 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/bbm-macros/bbm.sty 577 577 (/usr/local/texlive/2023/texmf-dist/tex/latex/bbm-macros/bbm.sty
Package: bbm 1999/03/15 V 1.2 provides fonts for set symbols - TH 578 578 Package: bbm 1999/03/15 V 1.2 provides fonts for set symbols - TH
LaTeX Font Info: Overwriting math alphabet `\mathbbm' in version `bold' 579 579 LaTeX Font Info: Overwriting math alphabet `\mathbbm' in version `bold'
(Font) U/bbm/m/n --> U/bbm/bx/n on input line 33. 580 580 (Font) U/bbm/m/n --> U/bbm/bx/n on input line 33.
LaTeX Font Info: Overwriting math alphabet `\mathbbmss' in version `bold' 581 581 LaTeX Font Info: Overwriting math alphabet `\mathbbmss' in version `bold'
(Font) U/bbmss/m/n --> U/bbmss/bx/n on input line 35. 582 582 (Font) U/bbmss/m/n --> U/bbmss/bx/n on input line 35.
) 583 583 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/environ/environ.sty 584 584 (/usr/local/texlive/2023/texmf-dist/tex/latex/environ/environ.sty
Package: environ 2014/05/04 v0.3 A new way to define environments 585 585 Package: environ 2014/05/04 v0.3 A new way to define environments
586 586
(/usr/local/texlive/2023/texmf-dist/tex/latex/trimspaces/trimspaces.sty 587 587 (/usr/local/texlive/2023/texmf-dist/tex/latex/trimspaces/trimspaces.sty
Package: trimspaces 2009/09/17 v1.1 Trim spaces around a token list 588 588 Package: trimspaces 2009/09/17 v1.1 Trim spaces around a token list
)) 589 589 ))
\c@upm@subfigure@count=\count310 590 590 \c@upm@subfigure@count=\count310
\c@upm@fmt@mtabular@columnnumber=\count311 591 591 \c@upm@fmt@mtabular@columnnumber=\count311
\c@upm@format@section@sectionlevel=\count312 592 592 \c@upm@format@section@sectionlevel=\count312
\c@upm@fmt@savedcounter=\count313 593 593 \c@upm@fmt@savedcounter=\count313
\c@@@upm@fmt@inlineenumeration=\count314 594 594 \c@@@upm@fmt@inlineenumeration=\count314
\c@@upm@fmt@enumdescription@cnt@=\count315 595 595 \c@@upm@fmt@enumdescription@cnt@=\count315
\upm@framed@minipage=\box113 596 596 \upm@framed@minipage=\box113
\upm@highlight@box@save=\box114 597 597 \upm@highlight@box@save=\box114
\c@upmdefinition=\count316 598 598 \c@upmdefinition=\count316
) 599 599 )
(./upmethodology-version.sty 600 600 (./upmethodology-version.sty
Package: upmethodology-version 2013/08/26 601 601 Package: upmethodology-version 2013/08/26
602 602
**** upmethodology-version is using French language **** 603 603 **** upmethodology-version is using French language ****
\upm@tmp@a=\count317 604 604 \upm@tmp@a=\count317
) 605 605 )
\listendskip=\skip62 606 606 \listendskip=\skip62
) 607 607 )
(./upmethodology-frontpage.sty 608 608 (./upmethodology-frontpage.sty
Package: upmethodology-frontpage 2015/06/26 609 609 Package: upmethodology-frontpage 2015/06/26
610 610
**** upmethodology-frontpage is using French language **** 611 611 **** upmethodology-frontpage is using French language ****
\upm@front@tmpa=\dimen256 612 612 \upm@front@tmpa=\dimen256
\upm@front@tmpb=\dimen257 613 613 \upm@front@tmpb=\dimen257
614 614
*** define extension value frontillustrationsize ****) 615 615 *** define extension value frontillustrationsize ****)
(./upmethodology-backpage.sty 616 616 (./upmethodology-backpage.sty
Package: upmethodology-backpage 2013/12/14 617 617 Package: upmethodology-backpage 2013/12/14
618 618
**** upmethodology-backpage is using French language ****) 619 619 **** upmethodology-backpage is using French language ****)
(/usr/local/texlive/2023/texmf-dist/tex/latex/url/url.sty 620 620 (/usr/local/texlive/2023/texmf-dist/tex/latex/url/url.sty
\Urlmuskip=\muskip17 621 621 \Urlmuskip=\muskip17
Package: url 2013/09/16 ver 3.4 Verb mode for urls, etc. 622 622 Package: url 2013/09/16 ver 3.4 Verb mode for urls, etc.
) 623 623 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/hyperref/hyperref.sty 624 624 (/usr/local/texlive/2023/texmf-dist/tex/latex/hyperref/hyperref.sty
Package: hyperref 2023-05-16 v7.00y Hypertext links for LaTeX 625 625 Package: hyperref 2023-05-16 v7.00y Hypertext links for LaTeX
626 626
(/usr/local/texlive/2023/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty 627 627 (/usr/local/texlive/2023/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
Package: ltxcmds 2020-05-10 v1.25 LaTeX kernel commands for general use (HO) 628 628 Package: ltxcmds 2020-05-10 v1.25 LaTeX kernel commands for general use (HO)
) 629 629 )
(/usr/local/texlive/2023/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty 630 630 (/usr/local/texlive/2023/texmf-dist/tex/generic/pdftexcmds/pdftexcmds.sty
Package: pdftexcmds 2020-06-27 v0.33 Utility functions of pdfTeX for LuaTeX (HO 631 631 Package: pdftexcmds 2020-06-27 v0.33 Utility functions of pdfTeX for LuaTeX (HO
) 632 632 )
633 633
(/usr/local/texlive/2023/texmf-dist/tex/generic/infwarerr/infwarerr.sty 634 634 (/usr/local/texlive/2023/texmf-dist/tex/generic/infwarerr/infwarerr.sty
Package: infwarerr 2019/12/03 v1.5 Providing info/warning/error messages (HO) 635 635 Package: infwarerr 2019/12/03 v1.5 Providing info/warning/error messages (HO)
) 636 636 )
Package pdftexcmds Info: \pdf@primitive is available. 637 637 Package pdftexcmds Info: \pdf@primitive is available.
Package pdftexcmds Info: \pdf@ifprimitive is available. 638 638 Package pdftexcmds Info: \pdf@ifprimitive is available.
Package pdftexcmds Info: \pdfdraftmode found. 639 639 Package pdftexcmds Info: \pdfdraftmode found.
) 640 640 )
(/usr/local/texlive/2023/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty 641 641 (/usr/local/texlive/2023/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
Package: kvdefinekeys 2019-12-19 v1.6 Define keys (HO) 642 642 Package: kvdefinekeys 2019-12-19 v1.6 Define keys (HO)
) 643 643 )
(/usr/local/texlive/2023/texmf-dist/tex/generic/pdfescape/pdfescape.sty 644 644 (/usr/local/texlive/2023/texmf-dist/tex/generic/pdfescape/pdfescape.sty
Package: pdfescape 2019/12/09 v1.15 Implements pdfTeX's escape features (HO) 645 645 Package: pdfescape 2019/12/09 v1.15 Implements pdfTeX's escape features (HO)
) 646 646 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/hycolor/hycolor.sty 647 647 (/usr/local/texlive/2023/texmf-dist/tex/latex/hycolor/hycolor.sty
Package: hycolor 2020-01-27 v1.10 Color options for hyperref/bookmark (HO) 648 648 Package: hycolor 2020-01-27 v1.10 Color options for hyperref/bookmark (HO)
) 649 649 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty 650 650 (/usr/local/texlive/2023/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
Package: letltxmacro 2019/12/03 v1.6 Let assignment for LaTeX macros (HO) 651 651 Package: letltxmacro 2019/12/03 v1.6 Let assignment for LaTeX macros (HO)
) 652 652 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/auxhook/auxhook.sty 653 653 (/usr/local/texlive/2023/texmf-dist/tex/latex/auxhook/auxhook.sty
Package: auxhook 2019-12-17 v1.6 Hooks for auxiliary files (HO) 654 654 Package: auxhook 2019-12-17 v1.6 Hooks for auxiliary files (HO)
) 655 655 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/hyperref/nameref.sty 656 656 (/usr/local/texlive/2023/texmf-dist/tex/latex/hyperref/nameref.sty
Package: nameref 2023-05-16 v2.51 Cross-referencing by name of section 657 657 Package: nameref 2023-05-16 v2.51 Cross-referencing by name of section
658 658
(/usr/local/texlive/2023/texmf-dist/tex/latex/refcount/refcount.sty 659 659 (/usr/local/texlive/2023/texmf-dist/tex/latex/refcount/refcount.sty
Package: refcount 2019/12/15 v3.6 Data extraction from label references (HO) 660 660 Package: refcount 2019/12/15 v3.6 Data extraction from label references (HO)
) 661 661 )
(/usr/local/texlive/2023/texmf-dist/tex/generic/gettitlestring/gettitlestring.s 662 662 (/usr/local/texlive/2023/texmf-dist/tex/generic/gettitlestring/gettitlestring.s
ty 663 663 ty
Package: gettitlestring 2019/12/15 v1.6 Cleanup title references (HO) 664 664 Package: gettitlestring 2019/12/15 v1.6 Cleanup title references (HO)
(/usr/local/texlive/2023/texmf-dist/tex/latex/kvoptions/kvoptions.sty 665 665 (/usr/local/texlive/2023/texmf-dist/tex/latex/kvoptions/kvoptions.sty
Package: kvoptions 2022-06-15 v3.15 Key value format for package options (HO) 666 666 Package: kvoptions 2022-06-15 v3.15 Key value format for package options (HO)
)) 667 667 ))
\c@section@level=\count318 668 668 \c@section@level=\count318
) 669 669 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/etoolbox/etoolbox.sty 670 670 (/usr/local/texlive/2023/texmf-dist/tex/latex/etoolbox/etoolbox.sty
Package: etoolbox 2020/10/05 v2.5k e-TeX tools for LaTeX (JAW) 671 671 Package: etoolbox 2020/10/05 v2.5k e-TeX tools for LaTeX (JAW)
\etb@tempcnta=\count319 672 672 \etb@tempcnta=\count319
) 673 673 )
\@linkdim=\dimen258 674 674 \@linkdim=\dimen258
\Hy@linkcounter=\count320 675 675 \Hy@linkcounter=\count320
\Hy@pagecounter=\count321 676 676 \Hy@pagecounter=\count321
677 677
(/usr/local/texlive/2023/texmf-dist/tex/latex/hyperref/pd1enc.def 678 678 (/usr/local/texlive/2023/texmf-dist/tex/latex/hyperref/pd1enc.def
File: pd1enc.def 2023-05-16 v7.00y Hyperref: PDFDocEncoding definition (HO) 679 679 File: pd1enc.def 2023-05-16 v7.00y Hyperref: PDFDocEncoding definition (HO)
Now handling font encoding PD1 ... 680 680 Now handling font encoding PD1 ...
... no UTF-8 mapping file for font encoding PD1 681 681 ... no UTF-8 mapping file for font encoding PD1
) 682 682 )
(/usr/local/texlive/2023/texmf-dist/tex/generic/intcalc/intcalc.sty 683 683 (/usr/local/texlive/2023/texmf-dist/tex/generic/intcalc/intcalc.sty
Package: intcalc 2019/12/15 v1.3 Expandable calculations with integers (HO) 684 684 Package: intcalc 2019/12/15 v1.3 Expandable calculations with integers (HO)
) 685 685 )
\Hy@SavedSpaceFactor=\count322 686 686 \Hy@SavedSpaceFactor=\count322
687 687
(/usr/local/texlive/2023/texmf-dist/tex/latex/hyperref/puenc.def 688 688 (/usr/local/texlive/2023/texmf-dist/tex/latex/hyperref/puenc.def
File: puenc.def 2023-05-16 v7.00y Hyperref: PDF Unicode definition (HO) 689 689 File: puenc.def 2023-05-16 v7.00y Hyperref: PDF Unicode definition (HO)
Now handling font encoding PU ... 690 690 Now handling font encoding PU ...
... no UTF-8 mapping file for font encoding PU 691 691 ... no UTF-8 mapping file for font encoding PU
) 692 692 )
Package hyperref Info: Option `breaklinks' set `true' on input line 4050. 693 693 Package hyperref Info: Option `breaklinks' set `true' on input line 4050.
Package hyperref Info: Option `pageanchor' set `true' on input line 4050. 694 694 Package hyperref Info: Option `pageanchor' set `true' on input line 4050.
Package hyperref Info: Option `bookmarks' set `false' on input line 4050. 695 695 Package hyperref Info: Option `bookmarks' set `false' on input line 4050.
Package hyperref Info: Option `hyperfigures' set `true' on input line 4050. 696 696 Package hyperref Info: Option `hyperfigures' set `true' on input line 4050.
Package hyperref Info: Option `hyperindex' set `true' on input line 4050. 697 697 Package hyperref Info: Option `hyperindex' set `true' on input line 4050.
Package hyperref Info: Option `linktocpage' set `true' on input line 4050. 698 698 Package hyperref Info: Option `linktocpage' set `true' on input line 4050.
Package hyperref Info: Option `bookmarks' set `true' on input line 4050. 699 699 Package hyperref Info: Option `bookmarks' set `true' on input line 4050.
Package hyperref Info: Option `bookmarksopen' set `true' on input line 4050. 700 700 Package hyperref Info: Option `bookmarksopen' set `true' on input line 4050.
Package hyperref Info: Option `bookmarksnumbered' set `true' on input line 4050 701 701 Package hyperref Info: Option `bookmarksnumbered' set `true' on input line 4050
. 702 702 .
Package hyperref Info: Option `colorlinks' set `false' on input line 4050. 703 703 Package hyperref Info: Option `colorlinks' set `false' on input line 4050.
Package hyperref Info: Hyper figures ON on input line 4165. 704 704 Package hyperref Info: Hyper figures ON on input line 4165.
Package hyperref Info: Link nesting OFF on input line 4172. 705 705 Package hyperref Info: Link nesting OFF on input line 4172.
Package hyperref Info: Hyper index ON on input line 4175. 706 706 Package hyperref Info: Hyper index ON on input line 4175.
Package hyperref Info: Plain pages OFF on input line 4182. 707 707 Package hyperref Info: Plain pages OFF on input line 4182.
Package hyperref Info: Backreferencing OFF on input line 4187. 708 708 Package hyperref Info: Backreferencing OFF on input line 4187.
Package hyperref Info: Implicit mode ON; LaTeX internals redefined. 709 709 Package hyperref Info: Implicit mode ON; LaTeX internals redefined.
Package hyperref Info: Bookmarks ON on input line 4434. 710 710 Package hyperref Info: Bookmarks ON on input line 4434.
LaTeX Info: Redefining \href on input line 4683. 711 711 LaTeX Info: Redefining \href on input line 4683.
\c@Hy@tempcnt=\count323 712 712 \c@Hy@tempcnt=\count323
LaTeX Info: Redefining \url on input line 4772. 713 713 LaTeX Info: Redefining \url on input line 4772.
\XeTeXLinkMargin=\dimen259 714 714 \XeTeXLinkMargin=\dimen259
715 715
(/usr/local/texlive/2023/texmf-dist/tex/generic/bitset/bitset.sty 716 716 (/usr/local/texlive/2023/texmf-dist/tex/generic/bitset/bitset.sty
Package: bitset 2019/12/09 v1.3 Handle bit-vector datatype (HO) 717 717 Package: bitset 2019/12/09 v1.3 Handle bit-vector datatype (HO)
718 718
(/usr/local/texlive/2023/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty 719 719 (/usr/local/texlive/2023/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
Package: bigintcalc 2019/12/15 v1.5 Expandable calculations on big integers (HO 720 720 Package: bigintcalc 2019/12/15 v1.5 Expandable calculations on big integers (HO
) 721 721 )
)) 722 722 ))
\Fld@menulength=\count324 723 723 \Fld@menulength=\count324
\Field@Width=\dimen260 724 724 \Field@Width=\dimen260
\Fld@charsize=\dimen261 725 725 \Fld@charsize=\dimen261
Package hyperref Info: Hyper figures ON on input line 6049. 726 726 Package hyperref Info: Hyper figures ON on input line 6049.
Package hyperref Info: Link nesting OFF on input line 6056. 727 727 Package hyperref Info: Link nesting OFF on input line 6056.
Package hyperref Info: Hyper index ON on input line 6059. 728 728 Package hyperref Info: Hyper index ON on input line 6059.
Package hyperref Info: backreferencing OFF on input line 6066. 729 729 Package hyperref Info: backreferencing OFF on input line 6066.
Package hyperref Info: Link coloring OFF on input line 6071. 730 730 Package hyperref Info: Link coloring OFF on input line 6071.
Package hyperref Info: Link coloring with OCG OFF on input line 6076. 731 731 Package hyperref Info: Link coloring with OCG OFF on input line 6076.
Package hyperref Info: PDF/A mode OFF on input line 6081. 732 732 Package hyperref Info: PDF/A mode OFF on input line 6081.
733 733
(/usr/local/texlive/2023/texmf-dist/tex/latex/base/atbegshi-ltx.sty 734 734 (/usr/local/texlive/2023/texmf-dist/tex/latex/base/atbegshi-ltx.sty
Package: atbegshi-ltx 2021/01/10 v1.0c Emulation of the original atbegshi 735 735 Package: atbegshi-ltx 2021/01/10 v1.0c Emulation of the original atbegshi
package with kernel methods 736 736 package with kernel methods
) 737 737 )
\Hy@abspage=\count325 738 738 \Hy@abspage=\count325
\c@Item=\count326 739 739 \c@Item=\count326
\c@Hfootnote=\count327 740 740 \c@Hfootnote=\count327
) 741 741 )
Package hyperref Info: Driver: hpdftex. 742 742 Package hyperref Info: Driver: hpdftex.
743 743
(/usr/local/texlive/2023/texmf-dist/tex/latex/hyperref/hpdftex.def 744 744 (/usr/local/texlive/2023/texmf-dist/tex/latex/hyperref/hpdftex.def
File: hpdftex.def 2023-05-16 v7.00y Hyperref driver for pdfTeX 745 745 File: hpdftex.def 2023-05-16 v7.00y Hyperref driver for pdfTeX
746 746
(/usr/local/texlive/2023/texmf-dist/tex/latex/base/atveryend-ltx.sty 747 747 (/usr/local/texlive/2023/texmf-dist/tex/latex/base/atveryend-ltx.sty
Package: atveryend-ltx 2020/08/19 v1.0a Emulation of the original atveryend pac 748 748 Package: atveryend-ltx 2020/08/19 v1.0a Emulation of the original atveryend pac
kage 749 749 kage
with kernel methods 750 750 with kernel methods
) 751 751 )
\Fld@listcount=\count328 752 752 \Fld@listcount=\count328
\c@bookmark@seq@number=\count329 753 753 \c@bookmark@seq@number=\count329
754 754
(/usr/local/texlive/2023/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty 755 755 (/usr/local/texlive/2023/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
Package: rerunfilecheck 2022-07-10 v1.10 Rerun checks for auxiliary files (HO) 756 756 Package: rerunfilecheck 2022-07-10 v1.10 Rerun checks for auxiliary files (HO)
757 757
(/usr/local/texlive/2023/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty 758 758 (/usr/local/texlive/2023/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
Package: uniquecounter 2019/12/15 v1.4 Provide unlimited unique counter (HO) 759 759 Package: uniquecounter 2019/12/15 v1.4 Provide unlimited unique counter (HO)
) 760 760 )
Package uniquecounter Info: New unique counter `rerunfilecheck' on input line 2 761 761 Package uniquecounter Info: New unique counter `rerunfilecheck' on input line 2
85. 762 762 85.
) 763 763 )
\Hy@SectionHShift=\skip63 764 764 \Hy@SectionHShift=\skip63
) 765 765 )
\upm@smalllogo@height=\dimen262 766 766 \upm@smalllogo@height=\dimen262
) (./spimbasephdthesis.sty 767 767 ) (./spimbasephdthesis.sty
Package: spimbasephdthesis 2015/09/01 768 768 Package: spimbasephdthesis 2015/09/01
769 769
(/usr/local/texlive/2023/texmf-dist/tex/latex/lettrine/lettrine.sty 770 770 (/usr/local/texlive/2023/texmf-dist/tex/latex/lettrine/lettrine.sty
File: lettrine.sty 2023-04-18 v2.40 (Daniel Flipo) 771 771 File: lettrine.sty 2023-04-18 v2.40 (Daniel Flipo)
772 772
(/usr/local/texlive/2023/texmf-dist/tex/latex/l3packages/xfp/xfp.sty 773 773 (/usr/local/texlive/2023/texmf-dist/tex/latex/l3packages/xfp/xfp.sty
(/usr/local/texlive/2023/texmf-dist/tex/latex/l3kernel/expl3.sty 774 774 (/usr/local/texlive/2023/texmf-dist/tex/latex/l3kernel/expl3.sty
Package: expl3 2023-05-22 L3 programming layer (loader) 775 775 Package: expl3 2023-05-22 L3 programming layer (loader)
776 776
(/usr/local/texlive/2023/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def 777 777 (/usr/local/texlive/2023/texmf-dist/tex/latex/l3backend/l3backend-pdftex.def
File: l3backend-pdftex.def 2023-04-19 L3 backend support: PDF output (pdfTeX) 778 778 File: l3backend-pdftex.def 2023-04-19 L3 backend support: PDF output (pdfTeX)
\l__color_backend_stack_int=\count330 779 779 \l__color_backend_stack_int=\count330
\l__pdf_internal_box=\box115 780 780 \l__pdf_internal_box=\box115
)) 781 781 ))
Package: xfp 2023-02-02 L3 Floating point unit 782 782 Package: xfp 2023-02-02 L3 Floating point unit
) 783 783 )
\c@DefaultLines=\count331 784 784 \c@DefaultLines=\count331
\c@DefaultDepth=\count332 785 785 \c@DefaultDepth=\count332
\DefaultFindent=\dimen263 786 786 \DefaultFindent=\dimen263
\DefaultNindent=\dimen264 787 787 \DefaultNindent=\dimen264
\DefaultSlope=\dimen265 788 788 \DefaultSlope=\dimen265
\DiscardVskip=\dimen266 789 789 \DiscardVskip=\dimen266
\L@lbox=\box116 790 790 \L@lbox=\box116
\L@tbox=\box117 791 791 \L@tbox=\box117
\c@L@lines=\count333 792 792 \c@L@lines=\count333
\c@L@depth=\count334 793 793 \c@L@depth=\count334
\L@Pindent=\dimen267 794 794 \L@Pindent=\dimen267
\L@Findent=\dimen268 795 795 \L@Findent=\dimen268
\L@Nindent=\dimen269 796 796 \L@Nindent=\dimen269
\L@lraise=\dimen270 797 797 \L@lraise=\dimen270
\L@first=\dimen271 798 798 \L@first=\dimen271
\L@next=\dimen272 799 799 \L@next=\dimen272
\L@slope=\dimen273 800 800 \L@slope=\dimen273
\L@height=\dimen274 801 801 \L@height=\dimen274
\L@novskip=\dimen275 802 802 \L@novskip=\dimen275
\L@target@ht=\dimen276 803 803 \L@target@ht=\dimen276
\L@target@dp=\dimen277 804 804 \L@target@dp=\dimen277
\L@target@tht=\dimen278 805 805 \L@target@tht=\dimen278
\LettrineWidth=\dimen279 806 806 \LettrineWidth=\dimen279
\LettrineHeight=\dimen280 807 807 \LettrineHeight=\dimen280
\LettrineDepth=\dimen281 808 808 \LettrineDepth=\dimen281
Loading lettrine.cfg 809 809 Loading lettrine.cfg
(/usr/local/texlive/2023/texmf-dist/tex/latex/lettrine/lettrine.cfg) 810 810 (/usr/local/texlive/2023/texmf-dist/tex/latex/lettrine/lettrine.cfg)
\Llist@everypar=\toks42 811 811 \Llist@everypar=\toks42
) 812 812 )
*** define extension value backcovermessage ****) 813 813 *** define extension value backcovermessage ****)
**** including upm extension spimufcphdthesis (upmext-spimufcphdthesis.cfg) *** 814 814 **** including upm extension spimufcphdthesis (upmext-spimufcphdthesis.cfg) ***
* (./upmext-spimufcphdthesis.cfg *** define extension value copyright **** 815 815 * (./upmext-spimufcphdthesis.cfg *** define extension value copyright ****
*** style extension spimufcphdthesis, Copyright {(c)} 2012--14 Dr. St\unhbox \v 816 816 *** style extension spimufcphdthesis, Copyright {(c)} 2012--14 Dr. St\unhbox \v
oidb@x \bgroup \let \unhbox \voidb@x \setbox \@tempboxa \hbox {e\global \mathch 817 817 oidb@x \bgroup \let \unhbox \voidb@x \setbox \@tempboxa \hbox {e\global \mathch
ardef \accent@spacefactor \spacefactor }\let \begingroup \let \typeout \protect 818 818 ardef \accent@spacefactor \spacefactor }\let \begingroup \let \typeout \protect
\begingroup \def \MessageBreak { 819 819 \begingroup \def \MessageBreak {
(Font) }\let \protect \immediate\write \m@ne {LaTeX Font Info: 820 820 (Font) }\let \protect \immediate\write \m@ne {LaTeX Font Info:
on input line 5.}\endgroup \endgroup \relax \let \ignorespaces \relax \accent 821 821 on input line 5.}\endgroup \endgroup \relax \let \ignorespaces \relax \accent
19 e\egroup \spacefactor \accent@spacefactor phane GALLAND. **** 822 822 19 e\egroup \spacefactor \accent@spacefactor phane GALLAND. ****
*** define extension value trademarks **** 823 823 *** define extension value trademarks ****
(/usr/local/texlive/2023/texmf-dist/tex/latex/psnfss/helvet.sty 824 824 (/usr/local/texlive/2023/texmf-dist/tex/latex/psnfss/helvet.sty
Package: helvet 2020/03/25 PSNFSS-v9.3 (WaS) 825 825 Package: helvet 2020/03/25 PSNFSS-v9.3 (WaS)
) 826 826 )
*** define extension value frontillustration **** 827 827 *** define extension value frontillustration ****
*** define extension value p3illustration **** 828 828 *** define extension value p3illustration ****
*** define extension value backillustration **** 829 829 *** define extension value backillustration ****
*** define extension value watermarksize **** 830 830 *** define extension value watermarksize ****
*** define extension value universityname **** 831 831 *** define extension value universityname ****
*** define extension value speciality **** 832 832 *** define extension value speciality ****
*** define extension value defensedate **** 833 833 *** define extension value defensedate ****
*** define extension value jurytabwidth **** 834 834 *** define extension value jurytabwidth ****
*** define extension value jurystyle **** 835 835 *** define extension value jurystyle ****
*** define extension value defensemessage ****)) 836 836 *** define extension value defensemessage ****))
(/usr/local/texlive/2023/texmf-dist/tex/latex/base/inputenc.sty 837 837 (/usr/local/texlive/2023/texmf-dist/tex/latex/base/inputenc.sty
Package: inputenc 2021/02/14 v1.3d Input encoding file 838 838 Package: inputenc 2021/02/14 v1.3d Input encoding file
\inpenc@prehook=\toks43 839 839 \inpenc@prehook=\toks43
\inpenc@posthook=\toks44 840 840 \inpenc@posthook=\toks44
) 841 841 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/base/fontenc.sty 842 842 (/usr/local/texlive/2023/texmf-dist/tex/latex/base/fontenc.sty
Package: fontenc 2021/04/29 v2.0v Standard LaTeX package 843 843 Package: fontenc 2021/04/29 v2.0v Standard LaTeX package
LaTeX Font Info: Trying to load font information for T1+phv on input line 11 844 844 LaTeX Font Info: Trying to load font information for T1+phv on input line 11
2. 845 845 2.
846 846
(/usr/local/texlive/2023/texmf-dist/tex/latex/psnfss/t1phv.fd 847 847 (/usr/local/texlive/2023/texmf-dist/tex/latex/psnfss/t1phv.fd
File: t1phv.fd 2020/03/25 scalable font definitions for T1/phv. 848 848 File: t1phv.fd 2020/03/25 scalable font definitions for T1/phv.
)) 849 849 ))
(/usr/local/texlive/2023/texmf-dist/tex/latex/psnfss/times.sty 850 850 (/usr/local/texlive/2023/texmf-dist/tex/latex/psnfss/times.sty
Package: times 2020/03/25 PSNFSS-v9.3 (SPQR) 851 851 Package: times 2020/03/25 PSNFSS-v9.3 (SPQR)
) 852 852 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/adjustbox/adjustbox.sty 853 853 (/usr/local/texlive/2023/texmf-dist/tex/latex/adjustbox/adjustbox.sty
Package: adjustbox 2022/10/17 v1.3a Adjusting TeX boxes (trim, clip, ...) 854 854 Package: adjustbox 2022/10/17 v1.3a Adjusting TeX boxes (trim, clip, ...)
855 855
(/usr/local/texlive/2023/texmf-dist/tex/latex/adjustbox/adjcalc.sty 856 856 (/usr/local/texlive/2023/texmf-dist/tex/latex/adjustbox/adjcalc.sty
Package: adjcalc 2012/05/16 v1.1 Provides advanced setlength with multiple back 857 857 Package: adjcalc 2012/05/16 v1.1 Provides advanced setlength with multiple back
-ends (calc, etex, pgfmath) 858 858 -ends (calc, etex, pgfmath)
) 859 859 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/adjustbox/trimclip.sty 860 860 (/usr/local/texlive/2023/texmf-dist/tex/latex/adjustbox/trimclip.sty
Package: trimclip 2020/08/19 v1.2 Trim and clip general TeX material 861 861 Package: trimclip 2020/08/19 v1.2 Trim and clip general TeX material
862 862
(/usr/local/texlive/2023/texmf-dist/tex/latex/collectbox/collectbox.sty 863 863 (/usr/local/texlive/2023/texmf-dist/tex/latex/collectbox/collectbox.sty
Package: collectbox 2022/10/17 v0.4c Collect macro arguments as boxes 864 864 Package: collectbox 2022/10/17 v0.4c Collect macro arguments as boxes
\collectedbox=\box118 865 865 \collectedbox=\box118
) 866 866 )
\tc@llx=\dimen282 867 867 \tc@llx=\dimen282
\tc@lly=\dimen283 868 868 \tc@lly=\dimen283
\tc@urx=\dimen284 869 869 \tc@urx=\dimen284
\tc@ury=\dimen285 870 870 \tc@ury=\dimen285
Package trimclip Info: Using driver 'tc-pdftex.def'. 871 871 Package trimclip Info: Using driver 'tc-pdftex.def'.
872 872
(/usr/local/texlive/2023/texmf-dist/tex/latex/adjustbox/tc-pdftex.def 873 873 (/usr/local/texlive/2023/texmf-dist/tex/latex/adjustbox/tc-pdftex.def
File: tc-pdftex.def 2019/01/04 v2.2 Clipping driver for pdftex 874 874 File: tc-pdftex.def 2019/01/04 v2.2 Clipping driver for pdftex
)) 875 875 ))
\adjbox@Width=\dimen286 876 876 \adjbox@Width=\dimen286
\adjbox@Height=\dimen287 877 877 \adjbox@Height=\dimen287
\adjbox@Depth=\dimen288 878 878 \adjbox@Depth=\dimen288
\adjbox@Totalheight=\dimen289 879 879 \adjbox@Totalheight=\dimen289
\adjbox@pwidth=\dimen290 880 880 \adjbox@pwidth=\dimen290
\adjbox@pheight=\dimen291 881 881 \adjbox@pheight=\dimen291
\adjbox@pdepth=\dimen292 882 882 \adjbox@pdepth=\dimen292
\adjbox@ptotalheight=\dimen293 883 883 \adjbox@ptotalheight=\dimen293
884 884
(/usr/local/texlive/2023/texmf-dist/tex/latex/ifoddpage/ifoddpage.sty 885 885 (/usr/local/texlive/2023/texmf-dist/tex/latex/ifoddpage/ifoddpage.sty
Package: ifoddpage 2022/10/18 v1.2 Conditionals for odd/even page detection 886 886 Package: ifoddpage 2022/10/18 v1.2 Conditionals for odd/even page detection
\c@checkoddpage=\count335 887 887 \c@checkoddpage=\count335
) 888 888 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/varwidth/varwidth.sty 889 889 (/usr/local/texlive/2023/texmf-dist/tex/latex/varwidth/varwidth.sty
Package: varwidth 2009/03/30 ver 0.92; Variable-width minipages 890 890 Package: varwidth 2009/03/30 ver 0.92; Variable-width minipages
\@vwid@box=\box119 891 891 \@vwid@box=\box119
\sift@deathcycles=\count336 892 892 \sift@deathcycles=\count336
\@vwid@loff=\dimen294 893 893 \@vwid@loff=\dimen294
\@vwid@roff=\dimen295 894 894 \@vwid@roff=\dimen295
)) 895 895 ))
(/usr/local/texlive/2023/texmf-dist/tex/latex/algorithms/algorithm.sty 896 896 (/usr/local/texlive/2023/texmf-dist/tex/latex/algorithms/algorithm.sty
Package: algorithm 2009/08/24 v0.1 Document Style `algorithm' - floating enviro 897 897 Package: algorithm 2009/08/24 v0.1 Document Style `algorithm' - floating enviro
nment 898 898 nment
899 899
(/usr/local/texlive/2023/texmf-dist/tex/latex/float/float.sty 900 900 (/usr/local/texlive/2023/texmf-dist/tex/latex/float/float.sty
Package: float 2001/11/08 v1.3d Float enhancements (AL) 901 901 Package: float 2001/11/08 v1.3d Float enhancements (AL)
\c@float@type=\count337 902 902 \c@float@type=\count337
\float@exts=\toks45 903 903 \float@exts=\toks45
\float@box=\box120 904 904 \float@box=\box120
\@float@everytoks=\toks46 905 905 \@float@everytoks=\toks46
\@floatcapt=\box121 906 906 \@floatcapt=\box121
) 907 907 )
\@float@every@algorithm=\toks47 908 908 \@float@every@algorithm=\toks47
\c@algorithm=\count338 909 909 \c@algorithm=\count338
) 910 910 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/algorithmicx/algpseudocode.sty 911 911 (/usr/local/texlive/2023/texmf-dist/tex/latex/algorithmicx/algpseudocode.sty
Package: algpseudocode 912 912 Package: algpseudocode
913 913
(/usr/local/texlive/2023/texmf-dist/tex/latex/algorithmicx/algorithmicx.sty 914 914 (/usr/local/texlive/2023/texmf-dist/tex/latex/algorithmicx/algorithmicx.sty
Package: algorithmicx 2005/04/27 v1.2 Algorithmicx 915 915 Package: algorithmicx 2005/04/27 v1.2 Algorithmicx
916 916
Document Style algorithmicx 1.2 - a greatly improved `algorithmic' style 917 917 Document Style algorithmicx 1.2 - a greatly improved `algorithmic' style
\c@ALG@line=\count339 918 918 \c@ALG@line=\count339
\c@ALG@rem=\count340 919 919 \c@ALG@rem=\count340
\c@ALG@nested=\count341 920 920 \c@ALG@nested=\count341
\ALG@tlm=\skip64 921 921 \ALG@tlm=\skip64
\ALG@thistlm=\skip65 922 922 \ALG@thistlm=\skip65
\c@ALG@Lnr=\count342 923 923 \c@ALG@Lnr=\count342
\c@ALG@blocknr=\count343 924 924 \c@ALG@blocknr=\count343
\c@ALG@storecount=\count344 925 925 \c@ALG@storecount=\count344
\c@ALG@tmpcounter=\count345 926 926 \c@ALG@tmpcounter=\count345
\ALG@tmplength=\skip66 927 927 \ALG@tmplength=\skip66
) 928 928 )
Document Style - pseudocode environments for use with the `algorithmicx' style 929 929 Document Style - pseudocode environments for use with the `algorithmicx' style
) *** define extension value defensedate **** 930 930 ) *** define extension value defensedate ****
(/usr/local/texlive/2023/texmf-dist/tex/latex/tools/layout.sty 931 931 (/usr/local/texlive/2023/texmf-dist/tex/latex/tools/layout.sty
Package: layout 2021-03-10 v1.2e Show layout parameters 932 932 Package: layout 2021-03-10 v1.2e Show layout parameters
\oneinch=\count346 933 933 \oneinch=\count346
\cnt@paperwidth=\count347 934 934 \cnt@paperwidth=\count347
\cnt@paperheight=\count348 935 935 \cnt@paperheight=\count348
\cnt@hoffset=\count349 936 936 \cnt@hoffset=\count349
\cnt@voffset=\count350 937 937 \cnt@voffset=\count350
\cnt@textheight=\count351 938 938 \cnt@textheight=\count351
\cnt@textwidth=\count352 939 939 \cnt@textwidth=\count352
\cnt@topmargin=\count353 940 940 \cnt@topmargin=\count353
\cnt@oddsidemargin=\count354 941 941 \cnt@oddsidemargin=\count354
\cnt@evensidemargin=\count355 942 942 \cnt@evensidemargin=\count355
\cnt@headheight=\count356 943 943 \cnt@headheight=\count356
\cnt@headsep=\count357 944 944 \cnt@headsep=\count357
\cnt@marginparsep=\count358 945 945 \cnt@marginparsep=\count358
\cnt@marginparwidth=\count359 946 946 \cnt@marginparwidth=\count359
\cnt@marginparpush=\count360 947 947 \cnt@marginparpush=\count360
\cnt@footskip=\count361 948 948 \cnt@footskip=\count361
\fheight=\count362 949 949 \fheight=\count362
\ref@top=\count363 950 950 \ref@top=\count363
\ref@hoffset=\count364 951 951 \ref@hoffset=\count364
\ref@voffset=\count365 952 952 \ref@voffset=\count365
\ref@head=\count366 953 953 \ref@head=\count366
\ref@body=\count367 954 954 \ref@body=\count367
\ref@foot=\count368 955 955 \ref@foot=\count368
\ref@margin=\count369 956 956 \ref@margin=\count369
\ref@marginwidth=\count370 957 957 \ref@marginwidth=\count370
\ref@marginpar=\count371 958 958 \ref@marginpar=\count371
\Interval=\count372 959 959 \Interval=\count372
\ExtraYPos=\count373 960 960 \ExtraYPos=\count373
\PositionX=\count374 961 961 \PositionX=\count374
\PositionY=\count375 962 962 \PositionY=\count375
\ArrowLength=\count376 963 963 \ArrowLength=\count376
) 964 964 )
(/usr/local/texlive/2023/texmf-dist/tex/latex/geometry/geometry.sty 965 965 (/usr/local/texlive/2023/texmf-dist/tex/latex/geometry/geometry.sty
Package: geometry 2020/01/02 v5.9 Page Geometry 966 966 Package: geometry 2020/01/02 v5.9 Page Geometry
967 967
(/usr/local/texlive/2023/texmf-dist/tex/generic/iftex/ifvtex.sty 968 968 (/usr/local/texlive/2023/texmf-dist/tex/generic/iftex/ifvtex.sty
Package: ifvtex 2019/10/25 v1.7 ifvtex legacy package. Use iftex instead. 969 969 Package: ifvtex 2019/10/25 v1.7 ifvtex legacy package. Use iftex instead.
) 970 970 )
\Gm@cnth=\count377 971 971 \Gm@cnth=\count377
\Gm@cntv=\count378 972 972 \Gm@cntv=\count378
\c@Gm@tempcnt=\count379 973 973 \c@Gm@tempcnt=\count379
\Gm@bindingoffset=\dimen296 974 974 \Gm@bindingoffset=\dimen296
\Gm@wd@mp=\dimen297 975 975 \Gm@wd@mp=\dimen297
\Gm@odd@mp=\dimen298 976 976 \Gm@odd@mp=\dimen298
\Gm@even@mp=\dimen299 977 977 \Gm@even@mp=\dimen299
\Gm@layoutwidth=\dimen300 978 978 \Gm@layoutwidth=\dimen300
\Gm@layoutheight=\dimen301 979 979 \Gm@layoutheight=\dimen301
\Gm@layouthoffset=\dimen302 980 980 \Gm@layouthoffset=\dimen302
\Gm@layoutvoffset=\dimen303 981 981 \Gm@layoutvoffset=\dimen303
\Gm@dimlist=\toks48 982 982 \Gm@dimlist=\toks48
) (./main.aux 983 983 ) (./main.aux
(./chapters/contexte2.aux 984 984 (./chapters/contexte2.aux
985 985
LaTeX Warning: Label `fig:figCycle' multiply defined. 986 986 LaTeX Warning: Label `fig:figCycle' multiply defined.
987 987
) (./chapters/EIAH.aux) (./chapters/CBR.aux) (./chapters/Architecture.aux) 988 988 ) (./chapters/EIAH.aux) (./chapters/CBR.aux) (./chapters/Architecture.aux)
(./chapters/TS.aux 989 989 (./chapters/TS.aux
990 990
LaTeX Warning: Label `eqBeta' multiply defined. 991 991 LaTeX Warning: Label `eqBeta' multiply defined.
992 992
)) 993 993 ))
\openout1 = `main.aux'. 994 994 \openout1 = `main.aux'.
995 995
LaTeX Font Info: Checking defaults for OML/txmi/m/it on input line 227. 996 996 LaTeX Font Info: Checking defaults for OML/txmi/m/it on input line 227.
LaTeX Font Info: Trying to load font information for OML+txmi on input line 997 997 LaTeX Font Info: Trying to load font information for OML+txmi on input line
227. 998 998 227.
(/usr/local/texlive/2023/texmf-dist/tex/latex/txfonts/omltxmi.fd 999 999 (/usr/local/texlive/2023/texmf-dist/tex/latex/txfonts/omltxmi.fd
File: omltxmi.fd 2000/12/15 v3.1 1000 1000 File: omltxmi.fd 2000/12/15 v3.1
) 1001 1001 )
LaTeX Font Info: ... okay on input line 227. 1002 1002 LaTeX Font Info: ... okay on input line 227.
LaTeX Font Info: Checking defaults for OMS/txsy/m/n on input line 227. 1003 1003 LaTeX Font Info: Checking defaults for OMS/txsy/m/n on input line 227.
LaTeX Font Info: Trying to load font information for OMS+txsy on input line 1004 1004 LaTeX Font Info: Trying to load font information for OMS+txsy on input line
227. 1005 1005 227.
1006 1006
(/usr/local/texlive/2023/texmf-dist/tex/latex/txfonts/omstxsy.fd 1007 1007 (/usr/local/texlive/2023/texmf-dist/tex/latex/txfonts/omstxsy.fd
File: omstxsy.fd 2000/12/15 v3.1 1008 1008 File: omstxsy.fd 2000/12/15 v3.1
) 1009 1009 )
LaTeX Font Info: ... okay on input line 227. 1010 1010 LaTeX Font Info: ... okay on input line 227.
LaTeX Font Info: Checking defaults for OT1/cmr/m/n on input line 227. 1011 1011 LaTeX Font Info: Checking defaults for OT1/cmr/m/n on input line 227.
LaTeX Font Info: ... okay on input line 227. 1012 1012 LaTeX Font Info: ... okay on input line 227.
LaTeX Font Info: Checking defaults for T1/cmr/m/n on input line 227. 1013 1013 LaTeX Font Info: Checking defaults for T1/cmr/m/n on input line 227.
LaTeX Font Info: ... okay on input line 227. 1014 1014 LaTeX Font Info: ... okay on input line 227.
LaTeX Font Info: Checking defaults for TS1/cmr/m/n on input line 227. 1015 1015 LaTeX Font Info: Checking defaults for TS1/cmr/m/n on input line 227.
LaTeX Font Info: ... okay on input line 227. 1016 1016 LaTeX Font Info: ... okay on input line 227.
LaTeX Font Info: Checking defaults for OMX/txex/m/n on input line 227. 1017 1017 LaTeX Font Info: Checking defaults for OMX/txex/m/n on input line 227.
LaTeX Font Info: Trying to load font information for OMX+txex on input line 1018 1018 LaTeX Font Info: Trying to load font information for OMX+txex on input line
227. 1019 1019 227.
1020 1020
(/usr/local/texlive/2023/texmf-dist/tex/latex/txfonts/omxtxex.fd 1021 1021 (/usr/local/texlive/2023/texmf-dist/tex/latex/txfonts/omxtxex.fd
File: omxtxex.fd 2000/12/15 v3.1 1022 1022 File: omxtxex.fd 2000/12/15 v3.1
) 1023 1023 )
LaTeX Font Info: ... okay on input line 227. 1024 1024 LaTeX Font Info: ... okay on input line 227.
LaTeX Font Info: Checking defaults for U/txexa/m/n on input line 227. 1025 1025 LaTeX Font Info: Checking defaults for U/txexa/m/n on input line 227.
LaTeX Font Info: Trying to load font information for U+txexa on input line 2 1026 1026 LaTeX Font Info: Trying to load font information for U+txexa on input line 2
27. 1027 1027 27.
1028 1028
(/usr/local/texlive/2023/texmf-dist/tex/latex/txfonts/utxexa.fd 1029 1029 (/usr/local/texlive/2023/texmf-dist/tex/latex/txfonts/utxexa.fd
File: utxexa.fd 2000/12/15 v3.1 1030 1030 File: utxexa.fd 2000/12/15 v3.1
) 1031 1031 )
LaTeX Font Info: ... okay on input line 227. 1032 1032 LaTeX Font Info: ... okay on input line 227.
LaTeX Font Info: Checking defaults for PD1/pdf/m/n on input line 227. 1033 1033 LaTeX Font Info: Checking defaults for PD1/pdf/m/n on input line 227.
LaTeX Font Info: ... okay on input line 227. 1034 1034 LaTeX Font Info: ... okay on input line 227.
LaTeX Font Info: Checking defaults for PU/pdf/m/n on input line 227. 1035 1035 LaTeX Font Info: Checking defaults for PU/pdf/m/n on input line 227.
LaTeX Font Info: ... okay on input line 227. 1036 1036 LaTeX Font Info: ... okay on input line 227.
1037 1037
(/usr/local/texlive/2023/texmf-dist/tex/context/base/mkii/supp-pdf.mkii 1038 1038 (/usr/local/texlive/2023/texmf-dist/tex/context/base/mkii/supp-pdf.mkii
[Loading MPS to PDF converter (version 2006.09.02).] 1039 1039 [Loading MPS to PDF converter (version 2006.09.02).]
\scratchcounter=\count380 1040 1040 \scratchcounter=\count380
\scratchdimen=\dimen304 1041 1041 \scratchdimen=\dimen304
\scratchbox=\box122 1042 1042 \scratchbox=\box122
\nofMPsegments=\count381 1043 1043 \nofMPsegments=\count381
\nofMParguments=\count382 1044 1044 \nofMParguments=\count382
\everyMPshowfont=\toks49 1045 1045 \everyMPshowfont=\toks49
\MPscratchCnt=\count383 1046 1046 \MPscratchCnt=\count383
\MPscratchDim=\dimen305 1047 1047 \MPscratchDim=\dimen305
\MPnumerator=\count384 1048 1048 \MPnumerator=\count384
\makeMPintoPDFobject=\count385 1049 1049 \makeMPintoPDFobject=\count385
\everyMPtoPDFconversion=\toks50 1050 1050 \everyMPtoPDFconversion=\toks50
) (/usr/local/texlive/2023/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty 1051 1051 ) (/usr/local/texlive/2023/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
Package: epstopdf-base 2020-01-24 v2.11 Base part for package epstopdf 1052 1052 Package: epstopdf-base 2020-01-24 v2.11 Base part for package epstopdf
Package epstopdf-base Info: Redefining graphics rule for `.eps' on input line 4 1053 1053 Package epstopdf-base Info: Redefining graphics rule for `.eps' on input line 4
85. 1054 1054 85.
1055 1055
(/usr/local/texlive/2023/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg 1056 1056 (/usr/local/texlive/2023/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg
File: epstopdf-sys.cfg 2010/07/13 v1.3 Configuration of (r)epstopdf for TeX Liv 1057 1057 File: epstopdf-sys.cfg 2010/07/13 v1.3 Configuration of (r)epstopdf for TeX Liv
e 1058 1058 e
)) 1059 1059 ))
LaTeX Info: Redefining \degres on input line 227. 1060 1060 LaTeX Info: Redefining \degres on input line 227.
LaTeX Info: Redefining \up on input line 227. 1061 1061 LaTeX Info: Redefining \up on input line 227.
Package caption Info: Begin \AtBeginDocument code. 1062 1062 Package caption Info: Begin \AtBeginDocument code.
Package caption Info: float package is loaded. 1063 1063 Package caption Info: float package is loaded.
Package caption Info: hyperref package is loaded. 1064 1064 Package caption Info: hyperref package is loaded.
Package caption Info: picinpar package is loaded. 1065 1065 Package caption Info: picinpar package is loaded.
Package caption Info: End \AtBeginDocument code. 1066 1066 Package caption Info: End \AtBeginDocument code.
1067 1067
*** Overriding the 'enumerate' environment. Pass option 'standardlists' for avo 1068 1068 *** Overriding the 'enumerate' environment. Pass option 'standardlists' for avo
iding this override. 1069 1069 iding this override.
*** Overriding the 'description' environment. Pass option 'standardlists' for a 1070 1070 *** Overriding the 'description' environment. Pass option 'standardlists' for a
voiding this override. ************ USE CUSTOM FRONT COVER 1071 1071 voiding this override. ************ USE CUSTOM FRONT COVER
Package hyperref Info: Link coloring OFF on input line 227. 1072 1072 Package hyperref Info: Link coloring OFF on input line 227.
(./main.out) 1073 1073 (./main.out)
(./main.out) 1074 1074 (./main.out)
\@outlinefile=\write3 1075 1075 \@outlinefile=\write3
\openout3 = `main.out'. 1076 1076 \openout3 = `main.out'.
1077 1077
1078 1078
*geometry* driver: auto-detecting 1079 1079 *geometry* driver: auto-detecting
*geometry* detected driver: pdftex 1080 1080 *geometry* detected driver: pdftex
*geometry* verbose mode - [ preamble ] result: 1081 1081 *geometry* verbose mode - [ preamble ] result:
* pass: disregarded the geometry package! 1082 1082 * pass: disregarded the geometry package!
* \paperwidth=598.14806pt 1083 1083 * \paperwidth=598.14806pt
* \paperheight=845.90042pt 1084 1084 * \paperheight=845.90042pt
* \textwidth=427.43153pt 1085 1085 * \textwidth=427.43153pt
* \textheight=671.71976pt 1086 1086 * \textheight=671.71976pt
* \oddsidemargin=99.58464pt 1087 1087 * \oddsidemargin=99.58464pt
* \evensidemargin=71.13188pt 1088 1088 * \evensidemargin=71.13188pt
* \topmargin=56.9055pt 1089 1089 * \topmargin=56.9055pt
* \headheight=12.0pt 1090 1090 * \headheight=12.0pt
* \headsep=31.29802pt 1091 1091 * \headsep=31.29802pt
* \topskip=11.0pt 1092 1092 * \topskip=11.0pt
* \footskip=31.29802pt 1093 1093 * \footskip=31.29802pt
* \marginparwidth=54.2025pt 1094 1094 * \marginparwidth=54.2025pt
* \marginparsep=7.0pt 1095 1095 * \marginparsep=7.0pt
* \columnsep=10.0pt 1096 1096 * \columnsep=10.0pt
* \skip\footins=10.0pt plus 4.0pt minus 2.0pt 1097 1097 * \skip\footins=10.0pt plus 4.0pt minus 2.0pt
* \hoffset=-72.26999pt 1098 1098 * \hoffset=-72.26999pt
* \voffset=-72.26999pt 1099 1099 * \voffset=-72.26999pt
* \mag=1000 1100 1100 * \mag=1000
* \@twocolumnfalse 1101 1101 * \@twocolumnfalse
* \@twosidetrue 1102 1102 * \@twosidetrue
* \@mparswitchtrue 1103 1103 * \@mparswitchtrue
* \@reversemarginfalse 1104 1104 * \@reversemarginfalse
* (1in=72.27pt=25.4mm, 1cm=28.453pt) 1105 1105 * (1in=72.27pt=25.4mm, 1cm=28.453pt)
1106 1106
*geometry* verbose mode - [ newgeometry ] result: 1107 1107 *geometry* verbose mode - [ newgeometry ] result:
* driver: pdftex 1108 1108 * driver: pdftex
* paper: a4paper 1109 1109 * paper: a4paper
* layout: <same size as paper> 1110 1110 * layout: <same size as paper>
* layoutoffset:(h,v)=(0.0pt,0.0pt) 1111 1111 * layoutoffset:(h,v)=(0.0pt,0.0pt)
* modes: twoside 1112 1112 * modes: twoside
* h-part:(L,W,R)=(170.71652pt, 355.65306pt, 71.77847pt) 1113 1113 * h-part:(L,W,R)=(170.71652pt, 355.65306pt, 71.77847pt)
* v-part:(T,H,B)=(101.50906pt, 741.54591pt, 2.84544pt) 1114 1114 * v-part:(T,H,B)=(101.50906pt, 741.54591pt, 2.84544pt)
* \paperwidth=598.14806pt 1115 1115 * \paperwidth=598.14806pt
* \paperheight=845.90042pt 1116 1116 * \paperheight=845.90042pt
* \textwidth=355.65306pt 1117 1117 * \textwidth=355.65306pt
* \textheight=741.54591pt 1118 1118 * \textheight=741.54591pt
* \oddsidemargin=98.44653pt 1119 1119 * \oddsidemargin=98.44653pt
* \evensidemargin=-0.49152pt 1120 1120 * \evensidemargin=-0.49152pt
* \topmargin=-14.05894pt 1121 1121 * \topmargin=-14.05894pt
* \headheight=12.0pt 1122 1122 * \headheight=12.0pt
* \headsep=31.29802pt 1123 1123 * \headsep=31.29802pt
* \topskip=11.0pt 1124 1124 * \topskip=11.0pt
* \footskip=31.29802pt 1125 1125 * \footskip=31.29802pt
* \marginparwidth=54.2025pt 1126 1126 * \marginparwidth=54.2025pt
* \marginparsep=7.0pt 1127 1127 * \marginparsep=7.0pt
* \columnsep=10.0pt 1128 1128 * \columnsep=10.0pt
* \skip\footins=10.0pt plus 4.0pt minus 2.0pt 1129 1129 * \skip\footins=10.0pt plus 4.0pt minus 2.0pt
* \hoffset=-72.26999pt 1130 1130 * \hoffset=-72.26999pt
* \voffset=-72.26999pt 1131 1131 * \voffset=-72.26999pt
* \mag=1000 1132 1132 * \mag=1000
* \@twocolumnfalse 1133 1133 * \@twocolumnfalse
* \@twosidetrue 1134 1134 * \@twosidetrue
* \@mparswitchtrue 1135 1135 * \@mparswitchtrue
* \@reversemarginfalse 1136 1136 * \@reversemarginfalse
* (1in=72.27pt=25.4mm, 1cm=28.453pt) 1137 1137 * (1in=72.27pt=25.4mm, 1cm=28.453pt)
1138 1138
<images_logos/image1_logoUBFC_grand.png, id=188, 156.6945pt x 74.898pt> 1139 1139 <images_logos/image1_logoUBFC_grand.png, id=188, 156.6945pt x 74.898pt>
File: images_logos/image1_logoUBFC_grand.png Graphic file (type png) 1140 1140 File: images_logos/image1_logoUBFC_grand.png Graphic file (type png)
<use images_logos/image1_logoUBFC_grand.png> 1141 1141 <use images_logos/image1_logoUBFC_grand.png>
Package pdftex.def Info: images_logos/image1_logoUBFC_grand.png used on input 1142 1142 Package pdftex.def Info: images_logos/image1_logoUBFC_grand.png used on input
line 233. 1143 1143 line 233.
(pdftex.def) Requested size: 142.25905pt x 68.00069pt. 1144 1144 (pdftex.def) Requested size: 142.25905pt x 68.00069pt.
<images_logos/logo_UFC_2018_transparence.png, id=190, 1160.335pt x 285.065pt> 1145 1145 <images_logos/logo_UFC_2018_transparence.png, id=190, 1160.335pt x 285.065pt>
File: images_logos/logo_UFC_2018_transparence.png Graphic file (type png) 1146 1146 File: images_logos/logo_UFC_2018_transparence.png Graphic file (type png)
<use images_logos/logo_UFC_2018_transparence.png> 1147 1147 <use images_logos/logo_UFC_2018_transparence.png>
Package pdftex.def Info: images_logos/logo_UFC_2018_transparence.png used on i 1148 1148 Package pdftex.def Info: images_logos/logo_UFC_2018_transparence.png used on i
nput line 233. 1149 1149 nput line 233.
(pdftex.def) Requested size: 142.25905pt x 34.94577pt. 1150 1150 (pdftex.def) Requested size: 142.25905pt x 34.94577pt.
LaTeX Font Info: Trying to load font information for OT1+txr on input line 2 1151 1151 LaTeX Font Info: Trying to load font information for OT1+txr on input line 2
43. 1152 1152 43.
(/usr/local/texlive/2023/texmf-dist/tex/latex/txfonts/ot1txr.fd 1153 1153 (/usr/local/texlive/2023/texmf-dist/tex/latex/txfonts/ot1txr.fd
File: ot1txr.fd 2000/12/15 v3.1 1154 1154 File: ot1txr.fd 2000/12/15 v3.1
) 1155 1155 )
LaTeX Font Info: Trying to load font information for U+txmia on input line 2 1156 1156 LaTeX Font Info: Trying to load font information for U+txmia on input line 2
43. 1157 1157 43.
1158 1158
(/usr/local/texlive/2023/texmf-dist/tex/latex/txfonts/utxmia.fd 1159 1159 (/usr/local/texlive/2023/texmf-dist/tex/latex/txfonts/utxmia.fd
File: utxmia.fd 2000/12/15 v3.1 1160 1160 File: utxmia.fd 2000/12/15 v3.1
) 1161 1161 )
LaTeX Font Info: Trying to load font information for U+txsya on input line 2 1162 1162 LaTeX Font Info: Trying to load font information for U+txsya on input line 2
43. 1163 1163 43.
1164 1164
(/usr/local/texlive/2023/texmf-dist/tex/latex/txfonts/utxsya.fd 1165 1165 (/usr/local/texlive/2023/texmf-dist/tex/latex/txfonts/utxsya.fd
File: utxsya.fd 2000/12/15 v3.1 1166 1166 File: utxsya.fd 2000/12/15 v3.1
) 1167 1167 )
LaTeX Font Info: Trying to load font information for U+txsyb on input line 2 1168 1168 LaTeX Font Info: Trying to load font information for U+txsyb on input line 2
43. 1169 1169 43.
1170 1170
(/usr/local/texlive/2023/texmf-dist/tex/latex/txfonts/utxsyb.fd 1171 1171 (/usr/local/texlive/2023/texmf-dist/tex/latex/txfonts/utxsyb.fd
File: utxsyb.fd 2000/12/15 v3.1 1172 1172 File: utxsyb.fd 2000/12/15 v3.1
) 1173 1173 )
LaTeX Font Info: Trying to load font information for U+txsyc on input line 2 1174 1174 LaTeX Font Info: Trying to load font information for U+txsyc on input line 2
43. 1175 1175 43.
1176 1176
(/usr/local/texlive/2023/texmf-dist/tex/latex/txfonts/utxsyc.fd 1177 1177 (/usr/local/texlive/2023/texmf-dist/tex/latex/txfonts/utxsyc.fd
File: utxsyc.fd 2000/12/15 v3.1 1178 1178 File: utxsyc.fd 2000/12/15 v3.1
) [1 1179 1179 ) [1
1180 1180
1181 1181
1182 1182
1183 1183
{/usr/local/texlive/2023/texmf-var/fonts/map/pdftex/updmap/pdftex.map}{/usr/loc 1184 1184 {/usr/local/texlive/2023/texmf-var/fonts/map/pdftex/updmap/pdftex.map}{/usr/loc
al/texlive/2023/texmf-dist/fonts/enc/dvips/base/8r.enc} <./images_logos/image1_ 1185 1185 al/texlive/2023/texmf-dist/fonts/enc/dvips/base/8r.enc} <./images_logos/image1_
logoUBFC_grand.png> <./images_logos/logo_UFC_2018_transparence.png>] [2 1186 1186 logoUBFC_grand.png> <./images_logos/logo_UFC_2018_transparence.png>] [2
1187 1187
1188 1188
] [3] [4] 1189 1189 ] [3] [4]
(./main.toc 1190 1190 (./main.toc
LaTeX Font Info: Font shape `T1/phv/m/it' in size <10.95> not available 1191 1191 LaTeX Font Info: Font shape `T1/phv/m/it' in size <10.95> not available
(Font) Font shape `T1/phv/m/sl' tried instead on input line 23. 1192 1192 (Font) Font shape `T1/phv/m/sl' tried instead on input line 23.
[5 1193 1193 [5
1194 1194
]) 1195 1195 ])
\tf@toc=\write4 1196 1196 \tf@toc=\write4
\openout4 = `main.toc'. 1197 1197 \openout4 = `main.toc'.
1198 1198
[6] [1 1199 1199 [6] [1
1200 1200
1201 1201
] [2] 1202 1202 ] [2]
Chapitre 1. 1203 1203 Chapitre 1.
Package lettrine.sty Info: Targeted height = 19.96736pt 1204 1204 Package lettrine.sty Info: Targeted height = 19.96736pt
(lettrine.sty) (for loversize=0, accent excluded), 1205 1205 (lettrine.sty) (for loversize=0, accent excluded),
(lettrine.sty) Lettrine height = 20.612pt (\uppercase {C}); 1206 1206 (lettrine.sty) Lettrine height = 20.612pt (\uppercase {C});
(lettrine.sty) reported on input line 337. 1207 1207 (lettrine.sty) reported on input line 337.
1208 1208
Overfull \hbox (6.79999pt too wide) in paragraph at lines 337--337 1209 1209 Overfull \hbox (6.79999pt too wide) in paragraph at lines 337--337
[][][][] 1210 1210 [][][][]
[] 1211 1211 []
1212 1212
1213 1213
Underfull \vbox (badness 10000) has occurred while \output is active [] 1214 1214 Underfull \vbox (badness 10000) has occurred while \output is active []
1215 1215
[3 1216 1216 [3
1217 1217
] 1218 1218 ]
[4] [5] 1219 1219 [4] [5]
\openout2 = `./chapters/contexte2.aux'. 1220 1220 \openout2 = `./chapters/contexte2.aux'.
1221 1221
(./chapters/contexte2.tex [6 1222 1222 (./chapters/contexte2.tex [6
1223 1223
1224 1224
] 1225 1225 ]
Chapitre 2. 1226 1226 Chapitre 2.
<./Figures/TLearning.png, id=302, 603.25375pt x 331.2375pt> 1227 1227 <./Figures/TLearning.png, id=302, 603.25375pt x 331.2375pt>
File: ./Figures/TLearning.png Graphic file (type png) 1228 1228 File: ./Figures/TLearning.png Graphic file (type png)
<use ./Figures/TLearning.png> 1229 1229 <use ./Figures/TLearning.png>
Package pdftex.def Info: ./Figures/TLearning.png used on input line 15. 1230 1230 Package pdftex.def Info: ./Figures/TLearning.png used on input line 15.
(pdftex.def) Requested size: 427.43153pt x 234.69505pt. 1231 1231 (pdftex.def) Requested size: 427.43153pt x 234.69505pt.
[7] 1232 1232 [7]
<./Figures/EIAH.png, id=312, 643.40375pt x 362.35374pt> 1233 1233 <./Figures/EIAH.png, id=312, 643.40375pt x 362.35374pt>
File: ./Figures/EIAH.png Graphic file (type png) 1234 1234 File: ./Figures/EIAH.png Graphic file (type png)
<use ./Figures/EIAH.png> 1235 1235 <use ./Figures/EIAH.png>
Package pdftex.def Info: ./Figures/EIAH.png used on input line 32. 1236 1236 Package pdftex.def Info: ./Figures/EIAH.png used on input line 32.
(pdftex.def) Requested size: 427.43153pt x 240.73pt. 1237 1237 (pdftex.def) Requested size: 427.43153pt x 240.73pt.
1238 1238
1239 1239
LaTeX Warning: `!h' float specifier changed to `!ht'. 1240 1240 LaTeX Warning: `!h' float specifier changed to `!ht'.
1241 1241
[8 <./Figures/TLearning.png>] [9 <./Figures/EIAH.png>] [10] 1242 1242 [8 <./Figures/TLearning.png>] [9 <./Figures/EIAH.png>] [10]
<./Figures/cycle.png, id=340, 668.4975pt x 665.48625pt> 1243 1243 <./Figures/cycle.png, id=340, 668.4975pt x 665.48625pt>
File: ./Figures/cycle.png Graphic file (type png) 1244 1244 File: ./Figures/cycle.png Graphic file (type png)
<use ./Figures/cycle.png> 1245 1245 <use ./Figures/cycle.png>
Package pdftex.def Info: ./Figures/cycle.png used on input line 83. 1246 1246 Package pdftex.def Info: ./Figures/cycle.png used on input line 83.
(pdftex.def) Requested size: 427.43153pt x 425.51372pt. 1247 1247 (pdftex.def) Requested size: 427.43153pt x 425.51372pt.
[11 <./Figures/cycle.png>] 1248 1248 [11 <./Figures/cycle.png>]
<./Figures/Reuse.png, id=363, 383.4325pt x 182.6825pt> 1249 1249 <./Figures/Reuse.png, id=363, 383.4325pt x 182.6825pt>
File: ./Figures/Reuse.png Graphic file (type png) 1250 1250 File: ./Figures/Reuse.png Graphic file (type png)
<use ./Figures/Reuse.png> 1251 1251 <use ./Figures/Reuse.png>
Package pdftex.def Info: ./Figures/Reuse.png used on input line 112. 1252 1252 Package pdftex.def Info: ./Figures/Reuse.png used on input line 112.
(pdftex.def) Requested size: 427.43153pt x 203.65802pt. 1253 1253 (pdftex.def) Requested size: 427.43153pt x 203.65802pt.
1254 1254
Underfull \hbox (badness 10000) in paragraph at lines 112--112 1255 1255 Underfull \hbox (badness 10000) in paragraph at lines 112--112
[]\T1/phv/m/sc/10.95 Figure 2.4 \T1/phv/m/n/10.95 ^^U |Prin-cipe de réuti-li-sa 1256 1256 []\T1/phv/m/sc/10.95 Figure 2.4 \T1/phv/m/n/10.95 ^^U |Prin-cipe de réuti-li-sa
-tion dans le RàPC (Tra-duit de 1257 1257 -tion dans le RàPC (Tra-duit de
[] 1258 1258 []
1259 1259
[12] [13 <./Figures/Reuse.png>] 1260 1260 [12] [13 <./Figures/Reuse.png>]
<./Figures/CycleCBR.png, id=384, 147.1899pt x 83.8332pt> 1261 1261 <./Figures/CycleCBR.png, id=384, 147.1899pt x 83.8332pt>
File: ./Figures/CycleCBR.png Graphic file (type png) 1262 1262 File: ./Figures/CycleCBR.png Graphic file (type png)
<use ./Figures/CycleCBR.png> 1263 1263 <use ./Figures/CycleCBR.png>
Package pdftex.def Info: ./Figures/CycleCBR.png used on input line 156. 1264 1264 Package pdftex.def Info: ./Figures/CycleCBR.png used on input line 156.
(pdftex.def) Requested size: 427.43153pt x 243.45026pt. 1265 1265 (pdftex.def) Requested size: 427.43153pt x 243.45026pt.
[14] [15 <./Figures/CycleCBR.png>] [16] 1266 1266 [14] [15 <./Figures/CycleCBR.png>] [16]
1267 1267
LaTeX Warning: Command \textperiodcentered invalid in math mode on input line 2 1268 1268 LaTeX Warning: Command \textperiodcentered invalid in math mode on input line 2
65. 1269 1269 65.
1270 1270
LaTeX Font Info: Trying to load font information for TS1+phv on input line 2 1271 1271 LaTeX Font Info: Trying to load font information for TS1+phv on input line 2
65. 1272 1272 65.
(/usr/local/texlive/2023/texmf-dist/tex/latex/psnfss/ts1phv.fd 1273 1273 (/usr/local/texlive/2023/texmf-dist/tex/latex/psnfss/ts1phv.fd
File: ts1phv.fd 2020/03/25 scalable font definitions for TS1/phv. 1274 1274 File: ts1phv.fd 2020/03/25 scalable font definitions for TS1/phv.
) 1275 1275 )
1276 1276
LaTeX Warning: Command \textperiodcentered invalid in math mode on input line 2 1277 1277 LaTeX Warning: Command \textperiodcentered invalid in math mode on input line 2
65. 1278 1278 65.
1279 1279
1280 1280
LaTeX Warning: Command \textperiodcentered invalid in math mode on input line 2 1281 1281 LaTeX Warning: Command \textperiodcentered invalid in math mode on input line 2
65. 1282 1282 65.
1283 1283
1284 1284
LaTeX Warning: Command \textperiodcentered invalid in math mode on input line 2 1285 1285 LaTeX Warning: Command \textperiodcentered invalid in math mode on input line 2
65. 1286 1286 65.
1287 1287
1288 1288
LaTeX Warning: Command \textperiodcentered invalid in math mode on input line 2 1289 1289 LaTeX Warning: Command \textperiodcentered invalid in math mode on input line 2
65. 1290 1290 65.
1291 1291
1292 1292
LaTeX Warning: Command \textperiodcentered invalid in math mode on input line 2 1293 1293 LaTeX Warning: Command \textperiodcentered invalid in math mode on input line 2
65. 1294 1294 65.
1295 1295
Missing character: There is no · in font txr! 1296 1296 Missing character: There is no · in font txr!
Missing character: There is no · in font txr! 1297 1297 Missing character: There is no · in font txr!
Missing character: There is no · in font txr! 1298 1298 Missing character: There is no · in font txr!
1299 1299
LaTeX Font Warning: Font shape `T1/phv/m/scit' undefined 1300 1300 LaTeX Font Warning: Font shape `T1/phv/m/scit' undefined
(Font) using `T1/phv/m/it' instead on input line 284. 1301 1301 (Font) using `T1/phv/m/it' instead on input line 284.
1302 1302
[17] [18] 1303 1303 [17] [18]
1304 1304
LaTeX Font Warning: Font shape `T1/phv/m/scit' undefined 1305 1305 LaTeX Font Warning: Font shape `T1/phv/m/scit' undefined
(Font) using `T1/phv/m/it' instead on input line 333. 1306 1306 (Font) using `T1/phv/m/it' instead on input line 333.
1307 1307
1308 1308
LaTeX Font Warning: Font shape `T1/phv/m/scit' undefined 1309 1309 LaTeX Font Warning: Font shape `T1/phv/m/scit' undefined
(Font) using `T1/phv/m/it' instead on input line 337. 1310 1310 (Font) using `T1/phv/m/it' instead on input line 337.
1311 1311
<./Figures/beta-distribution.png, id=459, 621.11293pt x 480.07928pt> 1312 1312 <./Figures/beta-distribution.png, id=459, 621.11293pt x 480.07928pt>
File: ./Figures/beta-distribution.png Graphic file (type png) 1313 1313 File: ./Figures/beta-distribution.png Graphic file (type png)
<use ./Figures/beta-distribution.png> 1314 1314 <use ./Figures/beta-distribution.png>
Package pdftex.def Info: ./Figures/beta-distribution.png used on input line 34 1315 1315 Package pdftex.def Info: ./Figures/beta-distribution.png used on input line 34
5. 1316 1316 5.
(pdftex.def) Requested size: 427.43153pt x 330.38333pt. 1317 1317 (pdftex.def) Requested size: 427.43153pt x 330.38333pt.
[19]) [20 <./Figures/beta-distribution.png>] [21 1318 1318 [19]) [20 <./Figures/beta-distribution.png>] [21
1319 1319
1320 1320
1321 1321
] [22] 1322 1322 ] [22]
\openout2 = `./chapters/EIAH.aux'. 1323 1323 \openout2 = `./chapters/EIAH.aux'.
1324 1324
(./chapters/EIAH.tex 1325 1325 (./chapters/EIAH.tex
Chapitre 3. 1326 1326 Chapitre 3.
[23 1327 1327 [23
1328 1328
1329 1329
] 1330 1330 ]
Underfull \hbox (badness 10000) in paragraph at lines 23--24 1331 1331 Underfull \hbox (badness 10000) in paragraph at lines 23--24
[]\T1/phv/m/n/10.95 Les tech-niques d'IA peuvent aussi ai-der à prendre des dé- 1332 1332 []\T1/phv/m/n/10.95 Les tech-niques d'IA peuvent aussi ai-der à prendre des dé-
ci-sions stra-té- 1333 1333 ci-sions stra-té-
[] 1334 1334 []
1335 1335
1336 1336
Underfull \hbox (badness 1874) in paragraph at lines 23--24 1337 1337 Underfull \hbox (badness 1874) in paragraph at lines 23--24
\T1/phv/m/n/10.95 giques vi-sant des ob-jec-tifs à longue échéance comme le mon 1338 1338 \T1/phv/m/n/10.95 giques vi-sant des ob-jec-tifs à longue échéance comme le mon
tre le tra-vail de 1339 1339 tre le tra-vail de
[] 1340 1340 []
1341 1341
<./Figures/architecture.png, id=497, 776.9025pt x 454.69875pt> 1342 1342 <./Figures/architecture.png, id=497, 776.9025pt x 454.69875pt>
File: ./Figures/architecture.png Graphic file (type png) 1343 1343 File: ./Figures/architecture.png Graphic file (type png)
<use ./Figures/architecture.png> 1344 1344 <use ./Figures/architecture.png>
Package pdftex.def Info: ./Figures/architecture.png used on input line 37. 1345 1345 Package pdftex.def Info: ./Figures/architecture.png used on input line 37.
(pdftex.def) Requested size: 427.43153pt x 250.16833pt. 1346 1346 (pdftex.def) Requested size: 427.43153pt x 250.16833pt.
1347 1347
LaTeX Warning: Reference `sectBanditManchot' on page 24 undefined on input line 1348 1348 LaTeX Warning: Reference `sectBanditManchot' on page 24 undefined on input line
42. 1349 1349 42.
1350 1350
[24] 1351 1351 [24]
Underfull \vbox (badness 10000) has occurred while \output is active [] 1352 1352 Underfull \vbox (badness 10000) has occurred while \output is active []
1353 1353
[25 <./Figures/architecture.png>] 1354 1354 [25 <./Figures/architecture.png>]
<./Figures/ELearningLevels.png, id=526, 602.25pt x 612.78937pt> 1355 1355 <./Figures/ELearningLevels.png, id=526, 602.25pt x 612.78937pt>
File: ./Figures/ELearningLevels.png Graphic file (type png) 1356 1356 File: ./Figures/ELearningLevels.png Graphic file (type png)
<use ./Figures/ELearningLevels.png> 1357 1357 <use ./Figures/ELearningLevels.png>
Package pdftex.def Info: ./Figures/ELearningLevels.png used on input line 61. 1358 1358 Package pdftex.def Info: ./Figures/ELearningLevels.png used on input line 61.
(pdftex.def) Requested size: 427.43153pt x 434.92455pt. 1359 1359 (pdftex.def) Requested size: 427.43153pt x 434.92455pt.
1360 1360
Underfull \hbox (badness 3690) in paragraph at lines 61--61 1361 1361 Underfull \hbox (badness 3690) in paragraph at lines 61--61
[]\T1/phv/m/sc/10.95 Figure 3.2 \T1/phv/m/n/10.95 ^^U |Tra-duc-tion des ni-veau 1362 1362 []\T1/phv/m/sc/10.95 Figure 3.2 \T1/phv/m/n/10.95 ^^U |Tra-duc-tion des ni-veau
x du sys-tème de re-com-man-da-tion dans 1363 1363 x du sys-tème de re-com-man-da-tion dans
[] 1364 1364 []
1365 1365
1366 1366
Underfull \hbox (badness 10000) in paragraph at lines 70--74 1367 1367 Underfull \hbox (badness 10000) in paragraph at lines 70--74
[][][][][][] 1368 1368 [][][][][][]
[] 1369 1369 []
1370 1370
1371 1371
Underfull \hbox (badness 10000) in paragraph at lines 70--74 1372 1372 Underfull \hbox (badness 10000) in paragraph at lines 70--74
[][][][][][] 1373 1373 [][][][][][]
[] 1374 1374 []
1375 1375
1376 1376
Underfull \vbox (badness 10000) has occurred while \output is active [] 1377 1377 Underfull \vbox (badness 10000) has occurred while \output is active []
1378 1378
[26] 1379 1379 [26]
Overfull \hbox (2.56369pt too wide) in paragraph at lines 84--84 1380 1380 Overfull \hbox (2.56369pt too wide) in paragraph at lines 84--84
[]|\T1/phv/m/n/9 [[]]| 1381 1381 []|\T1/phv/m/n/9 [[]]|
[] 1382 1382 []
1383 1383
1384 1384
Overfull \hbox (0.5975pt too wide) in paragraph at lines 79--95 1385 1385 Overfull \hbox (0.5975pt too wide) in paragraph at lines 79--95
[][] 1386 1386 [][]
[] 1387 1387 []
1388 1388
) [27 <./Figures/ELearningLevels.png>] [28] 1389 1389 ) [27 <./Figures/ELearningLevels.png>] [28]
\openout2 = `./chapters/CBR.aux'. 1390 1390 \openout2 = `./chapters/CBR.aux'.
1391 1391
(./chapters/CBR.tex 1392 1392 (./chapters/CBR.tex
Chapitre 4. 1393 1393 Chapitre 4.
1394 1394
Underfull \vbox (badness 4967) has occurred while \output is active [] 1395 1395 Underfull \vbox (badness 4967) has occurred while \output is active []
1396 1396
[29 1397 1397 [29
1398 1398
1399 1399
1400 1400
1401 1401
] 1402 1402 ]
Underfull \vbox (badness 10000) has occurred while \output is active [] 1403 1403 Underfull \vbox (badness 10000) has occurred while \output is active []
1404 1404
[30] 1405 1405 [30]
Underfull \vbox (badness 10000) has occurred while \output is active [] 1406 1406 Underfull \vbox (badness 10000) has occurred while \output is active []
1407 1407
[31] 1408 1408 [31]
Underfull \vbox (badness 10000) has occurred while \output is active [] 1409 1409 Underfull \vbox (badness 10000) has occurred while \output is active []
1410 1410
[32] 1411 1411 [32]
Underfull \hbox (badness 10000) in paragraph at lines 61--63 1412 1412 Underfull \hbox (badness 10000) in paragraph at lines 61--63
1413 1413
[] 1414 1414 []
1415 1415
1416 1416
Underfull \hbox (badness 10000) in paragraph at lines 64--65 1417 1417 Underfull \hbox (badness 10000) in paragraph at lines 64--65
1418 1418
[] 1419 1419 []
1420 1420
<./Figures/ModCBR1.png, id=620, 942.52126pt x 624.83438pt> 1421 1421 <./Figures/ModCBR1.png, id=622, 942.52126pt x 624.83438pt>
File: ./Figures/ModCBR1.png Graphic file (type png) 1422 1422 File: ./Figures/ModCBR1.png Graphic file (type png)
<use ./Figures/ModCBR1.png> 1423 1423 <use ./Figures/ModCBR1.png>
Package pdftex.def Info: ./Figures/ModCBR1.png used on input line 66. 1424 1424 Package pdftex.def Info: ./Figures/ModCBR1.png used on input line 66.
(pdftex.def) Requested size: 427.43153pt x 283.36574pt. 1425 1425 (pdftex.def) Requested size: 427.43153pt x 283.36574pt.
<./Figures/taxonomieEIAH.png, id=623, 984.67876pt x 614.295pt> 1426 1426 <./Figures/taxonomieEIAH.png, id=625, 984.67876pt x 614.295pt>
File: ./Figures/taxonomieEIAH.png Graphic file (type png) 1427 1427 File: ./Figures/taxonomieEIAH.png Graphic file (type png)
<use ./Figures/taxonomieEIAH.png> 1428 1428 <use ./Figures/taxonomieEIAH.png>
Package pdftex.def Info: ./Figures/taxonomieEIAH.png used on input line 72. 1429 1429 Package pdftex.def Info: ./Figures/taxonomieEIAH.png used on input line 72.
(pdftex.def) Requested size: 427.43153pt x 266.65376pt. 1430 1430 (pdftex.def) Requested size: 427.43153pt x 266.65376pt.
<./Figures/ModCBR2.png, id=626, 1145.27875pt x 545.03625pt> 1431 1431 <./Figures/ModCBR2.png, id=628, 1145.27875pt x 545.03625pt>
File: ./Figures/ModCBR2.png Graphic file (type png) 1432 1432 File: ./Figures/ModCBR2.png Graphic file (type png)
<use ./Figures/ModCBR2.png> 1433 1433 <use ./Figures/ModCBR2.png>
Package pdftex.def Info: ./Figures/ModCBR2.png used on input line 81. 1434 1434 Package pdftex.def Info: ./Figures/ModCBR2.png used on input line 81.
(pdftex.def) Requested size: 427.43153pt x 203.41505pt. 1435 1435 (pdftex.def) Requested size: 427.43153pt x 203.41505pt.
1436 1436
Underfull \vbox (badness 10000) has occurred while \output is active [] 1437 1437 Underfull \vbox (badness 10000) has occurred while \output is active []
1438 1438
[33] 1439 1439 [33]
1440 1440
LaTeX Warning: Text page 34 contains only floats. 1441 1441 LaTeX Warning: Text page 34 contains only floats.
1442 1442
[34 <./Figures/ModCBR1.png> <./Figures/taxonomieEIAH.png>] 1443 1443 [34 <./Figures/ModCBR1.png> <./Figures/taxonomieEIAH.png>]
Overfull \hbox (7.88272pt too wide) in paragraph at lines 93--93 1444 1444 Overfull \hbox (7.88272pt too wide) in paragraph at lines 93--93
[]|\T1/phv/m/n/9 [[]]| 1445 1445 []|\T1/phv/m/n/9 [[]]|
[] 1446 1446 []
1447 1447
1448 1448
Overfull \hbox (6.9288pt too wide) in paragraph at lines 109--109 1449 1449 Overfull \hbox (6.9288pt too wide) in paragraph at lines 109--109
[]|\T1/phv/m/n/9 [[]]| 1450 1450 []|\T1/phv/m/n/9 [[]]|
[] 1451 1451 []
1452 1452
) [35 <./Figures/ModCBR2.png>] [36] [37 1453 1453 ) [35 <./Figures/ModCBR2.png>] [36] [37
1454 1454
1455 1455
1456 1456
] [38] 1457 1457 ] [38]
\openout2 = `./chapters/Architecture.aux'. 1458 1458 \openout2 = `./chapters/Architecture.aux'.
1459 1459
(./chapters/Architecture.tex 1460 1460 (./chapters/Architecture.tex
Chapitre 5. 1461 1461 Chapitre 5.
1462 1462
Underfull \vbox (badness 10000) has occurred while \output is active [] 1463 1463 Underfull \vbox (badness 10000) has occurred while \output is active []
1464 1464
[39 1465 1465 [39
1466 1466
1467 1467
] 1468 1468 ]
<./Figures/AIVT.png, id=692, 1116.17pt x 512.91624pt> 1469 1469 <./Figures/AIVT.png, id=696, 1116.17pt x 512.91624pt>
File: ./Figures/AIVT.png Graphic file (type png) 1470 1470 File: ./Figures/AIVT.png Graphic file (type png)
<use ./Figures/AIVT.png> 1471 1471 <use ./Figures/AIVT.png>
Package pdftex.def Info: ./Figures/AIVT.png used on input line 21. 1472 1472 Package pdftex.def Info: ./Figures/AIVT.png used on input line 21.
(pdftex.def) Requested size: 427.43153pt x 196.41287pt. 1473 1473 (pdftex.def) Requested size: 427.43153pt x 196.41287pt.
1474 1474
Underfull \hbox (badness 3049) in paragraph at lines 38--39 1475 1475 Underfull \hbox (badness 3049) in paragraph at lines 38--39
[]|\T1/phv/m/n/10.95 Discipline des in-for-ma-tions conte- 1476 1476 []|\T1/phv/m/n/10.95 Discipline des in-for-ma-tions conte-
[] 1477 1477 []
1478 1478
1479 1479
Underfull \hbox (badness 2435) in paragraph at lines 40--40 1480 1480 Underfull \hbox (badness 2435) in paragraph at lines 40--40
[]|\T1/phv/m/n/10.95 Le ni-veau sco-laire de la ma-tière 1481 1481 []|\T1/phv/m/n/10.95 Le ni-veau sco-laire de la ma-tière
[] 1482 1482 []
1483 1483
1484 1484
Underfull \hbox (badness 7468) in paragraph at lines 41--42 1485 1485 Underfull \hbox (badness 7468) in paragraph at lines 41--42
[]|\T1/phv/m/n/10.95 Professeur, Ad-mi-nis- 1486 1486 []|\T1/phv/m/n/10.95 Professeur, Ad-mi-nis-
[] 1487 1487 []
1488 1488
1489 1489
Underfull \hbox (badness 7468) in paragraph at lines 42--43 1490 1490 Underfull \hbox (badness 7468) in paragraph at lines 42--43
[]|\T1/phv/m/n/10.95 Professeur, Ad-mi-nis- 1491 1491 []|\T1/phv/m/n/10.95 Professeur, Ad-mi-nis-
[] 1492 1492 []
1493 1493
1494 1494
Underfull \hbox (badness 5050) in paragraph at lines 46--46 1495 1495 Underfull \hbox (badness 5050) in paragraph at lines 46--46
[]|\T1/phv/m/n/10.95 Le type d'in-for-ma-tions conte-nues 1496 1496 []|\T1/phv/m/n/10.95 Le type d'in-for-ma-tions conte-nues
[] 1497 1497 []
1498 1498
1499 1499
Underfull \hbox (badness 10000) in paragraph at lines 48--49 1500 1500 Underfull \hbox (badness 10000) in paragraph at lines 48--49
[]|\T1/phv/m/n/10.95 Connaissances et 1501 1501 []|\T1/phv/m/n/10.95 Connaissances et
[] 1502 1502 []

No preview for this file type

main.synctex.gz View file @ 399d77d

No preview for this file type