<?xml version="1.0"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en">
	<id>https://marovi.ai/index.php?action=history&amp;feed=atom&amp;title=Module%3AGlossary%2Fdata</id>
	<title>Module:Glossary/data - Revision history</title>
	<link rel="self" type="application/atom+xml" href="https://marovi.ai/index.php?action=history&amp;feed=atom&amp;title=Module%3AGlossary%2Fdata"/>
	<link rel="alternate" type="text/html" href="https://marovi.ai/index.php?title=Module:Glossary/data&amp;action=history"/>
	<updated>2026-04-27T19:26:32Z</updated>
	<subtitle>Revision history for this page on the wiki</subtitle>
	<generator>MediaWiki 1.39.1</generator>
	<entry>
		<id>https://marovi.ai/index.php?title=Module:Glossary/data&amp;diff=2175&amp;oldid=prev</id>
		<title>DeployBot: [deploy-bot] Deploy Glossary data module (v1.3.0)</title>
		<link rel="alternate" type="text/html" href="https://marovi.ai/index.php?title=Module:Glossary/data&amp;diff=2175&amp;oldid=prev"/>
		<updated>2026-04-26T23:37:22Z</updated>

		<summary type="html">&lt;p&gt;[deploy-bot] Deploy Glossary data module (v1.3.0)&lt;/p&gt;
&lt;p&gt;&lt;b&gt;New page&lt;/b&gt;&lt;/p&gt;&lt;div&gt;local data = {}&lt;br /&gt;
&lt;br /&gt;
data[&amp;quot;stochastic gradient descent&amp;quot;] = {&lt;br /&gt;
    short = &amp;quot;An iterative optimization algorithm that estimates gradients from random samples rather than the full dataset, enabling efficient training on large datasets.&amp;quot;,&lt;br /&gt;
    article = &amp;quot;Stochastic Gradient Descent&amp;quot;,&lt;br /&gt;
    aliases = { &amp;quot;SGD&amp;quot; },&lt;br /&gt;
    es = &amp;quot;Algoritmo de optimización iterativa que estima gradientes a partir de muestras aleatorias en lugar de todo el conjunto de datos.&amp;quot;,&lt;br /&gt;
    zh = &amp;quot;一种通过随机样本而非完整数据集来估计梯度的迭代优化算法，能够在大规模数据集上高效训练。&amp;quot;,&lt;br /&gt;
}&lt;br /&gt;
&lt;br /&gt;
data[&amp;quot;gradient descent&amp;quot;] = {&lt;br /&gt;
    short = &amp;quot;An optimization algorithm that iteratively moves parameters in the direction of steepest decrease of a function.&amp;quot;,&lt;br /&gt;
    article = &amp;quot;Gradient descent&amp;quot;,&lt;br /&gt;
    aliases = {},&lt;br /&gt;
    es = &amp;quot;Algoritmo de optimización que mueve iterativamente los parámetros en la dirección de mayor descenso de una función.&amp;quot;,&lt;br /&gt;
    zh = &amp;quot;一种优化算法，沿函数最陡下降方向迭代调整参数。&amp;quot;,&lt;br /&gt;
}&lt;br /&gt;
&lt;br /&gt;
data[&amp;quot;learning rate&amp;quot;] = {&lt;br /&gt;
    short = &amp;quot;Step size parameter controlling how much model parameters change per gradient update. Too large causes divergence; too small causes slow convergence.&amp;quot;,&lt;br /&gt;
    article = &amp;quot;Learning rate&amp;quot;,&lt;br /&gt;
    aliases = { &amp;quot;step size&amp;quot; },&lt;br /&gt;
    es = &amp;quot;Parámetro de tamaño de paso que controla cuánto cambian los parámetros del modelo por actualización de gradiente.&amp;quot;,&lt;br /&gt;
    zh = &amp;quot;控制每次梯度更新中模型参数变化幅度的步长参数。过大会导致发散，过小会导致收敛缓慢。&amp;quot;,&lt;br /&gt;
}&lt;br /&gt;
&lt;br /&gt;
data[&amp;quot;mini-batch&amp;quot;] = {&lt;br /&gt;
    short = &amp;quot;A small subset of training data used to compute a gradient estimate. Balances the noise of single-sample SGD with the cost of full-batch gradient descent.&amp;quot;,&lt;br /&gt;
    article = nil,&lt;br /&gt;
    aliases = { &amp;quot;minibatch&amp;quot;, &amp;quot;mini batch&amp;quot; },&lt;br /&gt;
    es = &amp;quot;Un subconjunto pequeño de datos de entrenamiento utilizado para calcular una estimación del gradiente.&amp;quot;,&lt;br /&gt;
    zh = &amp;quot;用于计算梯度估计的训练数据小子集。在单样本SGD的噪声与全批量梯度下降的成本之间取得平衡。&amp;quot;,&lt;br /&gt;
}&lt;br /&gt;
&lt;br /&gt;
data[&amp;quot;backpropagation&amp;quot;] = {&lt;br /&gt;
    short = &amp;quot;An algorithm for computing gradients of a loss function with respect to network weights by applying the chain rule layer by layer from output to input.&amp;quot;,&lt;br /&gt;
    article = &amp;quot;Backpropagation&amp;quot;,&lt;br /&gt;
    aliases = { &amp;quot;backprop&amp;quot; },&lt;br /&gt;
    es = &amp;quot;Algoritmo para calcular gradientes de una función de pérdida respecto a los pesos de la red, aplicando la regla de la cadena capa por capa.&amp;quot;,&lt;br /&gt;
    zh = &amp;quot;一种通过逐层应用链式法则从输出到输入计算损失函数关于网络权重梯度的算法。&amp;quot;,&lt;br /&gt;
}&lt;br /&gt;
&lt;br /&gt;
data[&amp;quot;loss function&amp;quot;] = {&lt;br /&gt;
    short = &amp;quot;A function that measures how far a model&amp;#039;s predictions are from the true values. Training minimizes this function.&amp;quot;,&lt;br /&gt;
    article = nil,&lt;br /&gt;
    aliases = { &amp;quot;objective function&amp;quot;, &amp;quot;cost function&amp;quot; },&lt;br /&gt;
    es = &amp;quot;Función que mide cuán lejos están las predicciones del modelo de los valores reales. El entrenamiento minimiza esta función.&amp;quot;,&lt;br /&gt;
    zh = &amp;quot;衡量模型预测值与真实值之间差距的函数。训练过程即是最小化此函数。&amp;quot;,&lt;br /&gt;
}&lt;br /&gt;
&lt;br /&gt;
data[&amp;quot;convergence&amp;quot;] = {&lt;br /&gt;
    short = &amp;quot;The property of an optimization algorithm approaching a solution (minimum) as iterations increase.&amp;quot;,&lt;br /&gt;
    article = nil,&lt;br /&gt;
    aliases = {},&lt;br /&gt;
    es = &amp;quot;La propiedad de un algoritmo de optimización de aproximarse a una solución (mínimo) a medida que aumentan las iteraciones.&amp;quot;,&lt;br /&gt;
    zh = &amp;quot;优化算法随迭代次数增加而逐渐接近解（最小值）的性质。&amp;quot;,&lt;br /&gt;
}&lt;br /&gt;
&lt;br /&gt;
data[&amp;quot;momentum&amp;quot;] = {&lt;br /&gt;
    short = &amp;quot;A technique that accelerates SGD by accumulating an exponentially decaying moving average of past gradients, helping traverse flat regions and dampen oscillations.&amp;quot;,&lt;br /&gt;
    article = nil,&lt;br /&gt;
    aliases = {},&lt;br /&gt;
    es = &amp;quot;Técnica que acelera el SGD acumulando un promedio móvil con decaimiento exponencial de gradientes pasados.&amp;quot;,&lt;br /&gt;
    zh = &amp;quot;一种通过累积过去梯度的指数衰减移动平均来加速SGD的技术，有助于穿越平坦区域并抑制振荡。&amp;quot;,&lt;br /&gt;
}&lt;br /&gt;
&lt;br /&gt;
data[&amp;quot;adam&amp;quot;] = {&lt;br /&gt;
    short = &amp;quot;An adaptive learning rate optimizer combining momentum with per-parameter rate scaling. One of the most widely used optimizers in deep learning.&amp;quot;,&lt;br /&gt;
    article = &amp;quot;Adam (optimiser)&amp;quot;,&lt;br /&gt;
    aliases = { &amp;quot;Adam optimizer&amp;quot;, &amp;quot;Adam optimiser&amp;quot; },&lt;br /&gt;
    es = &amp;quot;Optimizador con tasa de aprendizaje adaptativa que combina momentum con escalado de tasa por parámetro.&amp;quot;,&lt;br /&gt;
    zh = &amp;quot;一种自适应学习率优化器，结合动量与逐参数速率缩放。深度学习中使用最广泛的优化器之一。&amp;quot;,&lt;br /&gt;
}&lt;br /&gt;
&lt;br /&gt;
data[&amp;quot;gradient clipping&amp;quot;] = {&lt;br /&gt;
    short = &amp;quot;A technique that caps the gradient norm before each update to prevent exploding gradients, especially in recurrent networks.&amp;quot;,&lt;br /&gt;
    article = nil,&lt;br /&gt;
    aliases = {},&lt;br /&gt;
    es = &amp;quot;Técnica que limita la norma del gradiente antes de cada actualización para prevenir gradientes explosivos.&amp;quot;,&lt;br /&gt;
    zh = &amp;quot;一种在每次更新前限制梯度范数以防止梯度爆炸的技术，尤其适用于循环网络。&amp;quot;,&lt;br /&gt;
}&lt;br /&gt;
&lt;br /&gt;
data[&amp;quot;batch normalization&amp;quot;] = {&lt;br /&gt;
    short = &amp;quot;A technique that normalizes layer inputs across a mini-batch, reducing internal covariate shift and allowing higher learning rates.&amp;quot;,&lt;br /&gt;
    article = nil,&lt;br /&gt;
    aliases = { &amp;quot;batch normalisation&amp;quot;, &amp;quot;batchnorm&amp;quot; },&lt;br /&gt;
    es = &amp;quot;Técnica que normaliza las entradas de cada capa a lo largo de un mini-lote, reduciendo el desplazamiento covariante interno.&amp;quot;,&lt;br /&gt;
    zh = &amp;quot;一种在小批量上对层输入进行归一化的技术，减少内部协变量偏移，允许使用更高的学习率。&amp;quot;,&lt;br /&gt;
}&lt;br /&gt;
&lt;br /&gt;
data[&amp;quot;convex optimization&amp;quot;] = {&lt;br /&gt;
    short = &amp;quot;The study of minimizing convex functions over convex sets, where any local minimum is also a global minimum.&amp;quot;,&lt;br /&gt;
    article = &amp;quot;Convex optimisation&amp;quot;,&lt;br /&gt;
    aliases = { &amp;quot;convex optimisation&amp;quot; },&lt;br /&gt;
    es = &amp;quot;El estudio de la minimización de funciones convexas sobre conjuntos convexos, donde todo mínimo local es también global.&amp;quot;,&lt;br /&gt;
    zh = &amp;quot;研究在凸集上最小化凸函数的学科，其中任何局部最小值也是全局最小值。&amp;quot;,&lt;br /&gt;
}&lt;br /&gt;
&lt;br /&gt;
return data&lt;/div&gt;</summary>
		<author><name>DeployBot</name></author>
	</entry>
</feed>