-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathindex.html
More file actions
947 lines (555 loc) · 29.1 KB
/
index.html
File metadata and controls
947 lines (555 loc) · 29.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
<!DOCTYPE html>
<html lang="zh-CN" data-default-color-scheme=auto>
<head>
<meta charset="UTF-8">
<link rel="apple-touch-icon" sizes="76x76" href="/img/fluid.png">
<link rel="icon" href="/img/fluid.png">
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=5.0, shrink-to-fit=no">
<meta http-equiv="x-ua-compatible" content="ie=edge">
<meta name="theme-color" content="#2f4154">
<meta name="author" content="">
<meta name="keywords" content="交通">
<meta name="description" content="个人小站,主要上传个人学习过程中的心得体会">
<meta property="og:type" content="website">
<meta property="og:title" content="Run's Studio">
<meta property="og:url" content="https://runsstudio.github.io/index.html">
<meta property="og:site_name" content="Run's Studio">
<meta property="og:description" content="个人小站,主要上传个人学习过程中的心得体会">
<meta property="og:locale" content="zh_CN">
<meta property="article:tag" content="交通">
<meta name="twitter:card" content="summary_large_image">
<title>Run's Studio</title>
<link rel="stylesheet" href="https://lib.baomitu.com/twitter-bootstrap/4.6.1/css/bootstrap.min.css" />
<!-- 主题依赖的图标库,不要自行修改 -->
<!-- Do not modify the link that theme dependent icons -->
<link rel="stylesheet" href="//at.alicdn.com/t/c/font_1749284_5i9bdhy70f8.css">
<link rel="stylesheet" href="//at.alicdn.com/t/c/font_1736178_k526ubmyhba.css">
<link rel="stylesheet" href="/css/main.css" />
<link id="highlight-css" rel="stylesheet" href="/css/highlight.css" />
<link id="highlight-css-dark" rel="stylesheet" href="/css/highlight-dark.css" />
<script id="fluid-configs">
var Fluid = window.Fluid || {};
Fluid.ctx = Object.assign({}, Fluid.ctx)
var CONFIG = {"hostname":"runsstudio.github.io","root":"/","version":"1.9.8","typing":{"enable":true,"typeSpeed":70,"cursorChar":"_","loop":false,"scope":[]},"anchorjs":{"enable":true,"element":"h1,h2,h3,h4,h5,h6","placement":"left","visible":"hover","icon":""},"progressbar":{"enable":true,"height_px":3,"color":"#29d","options":{"showSpinner":false,"trickleSpeed":100}},"code_language":{"enable":true,"default":"TEXT"},"copy_btn":true,"image_caption":{"enable":true},"image_zoom":{"enable":true,"img_url_replace":["",""]},"toc":{"enable":true,"placement":"right","headingSelector":"h1,h2,h3,h4,h5,h6","collapseDepth":0},"lazyload":{"enable":true,"loading_img":"/img/loading.gif","onlypost":false,"offset_factor":2},"web_analytics":{"enable":false,"follow_dnt":true,"baidu":null,"google":{"measurement_id":null},"tencent":{"sid":null,"cid":null},"leancloud":{"app_id":null,"app_key":null,"server_url":null,"path":"window.location.pathname","ignore_local":false},"umami":{"src":null,"website_id":null,"domains":null,"start_time":"2024-01-01T00:00:00.000Z","token":null,"api_server":null}},"search_path":"/local-search.xml","include_content_in_search":true};
if (CONFIG.web_analytics.follow_dnt) {
var dntVal = navigator.doNotTrack || window.doNotTrack || navigator.msDoNotTrack;
Fluid.ctx.dnt = dntVal && (dntVal.startsWith('1') || dntVal.startsWith('yes') || dntVal.startsWith('on'));
}
</script>
<script src="/js/utils.js" ></script>
<script src="/js/color-schema.js" ></script>
<meta name="generator" content="Hexo 7.3.0"></head>
<body>
<header>
<div class="header-inner" style="height: 80vh;">
<nav id="navbar" class="navbar fixed-top navbar-expand-lg navbar-dark scrolling-navbar">
<div class="container">
<a class="navbar-brand" href="/">
<strong>Run's blog</strong>
</a>
<button id="navbar-toggler-btn" class="navbar-toggler" type="button" data-toggle="collapse"
data-target="#navbarSupportedContent"
aria-controls="navbarSupportedContent" aria-expanded="false" aria-label="Toggle navigation">
<div class="animated-icon"><span></span><span></span><span></span></div>
</button>
<!-- Collapsible content -->
<div class="collapse navbar-collapse" id="navbarSupportedContent">
<ul class="navbar-nav ml-auto text-center">
<li class="nav-item">
<a class="nav-link" href="/" target="_self">
<i class="iconfont icon-home-fill"></i>
<span>首页</span>
</a>
</li>
<li class="nav-item">
<a class="nav-link" href="/archives/" target="_self">
<i class="iconfont icon-archive-fill"></i>
<span>归档</span>
</a>
</li>
<li class="nav-item">
<a class="nav-link" href="/categories/" target="_self">
<i class="iconfont icon-category-fill"></i>
<span>分类</span>
</a>
</li>
<li class="nav-item">
<a class="nav-link" href="/tags/" target="_self">
<i class="iconfont icon-tags-fill"></i>
<span>标签</span>
</a>
</li>
<li class="nav-item">
<a class="nav-link" href="/about/" target="_self">
<i class="iconfont icon-user-fill"></i>
<span>关于</span>
</a>
</li>
<li class="nav-item" id="search-btn">
<a class="nav-link" target="_self" href="javascript:;" data-toggle="modal" data-target="#modalSearch" aria-label="Search">
<i class="iconfont icon-search"></i>
</a>
</li>
<li class="nav-item" id="color-toggle-btn">
<a class="nav-link" target="_self" href="javascript:;" aria-label="Color Toggle">
<i class="iconfont icon-dark" id="color-toggle-icon"></i>
</a>
</li>
</ul>
</div>
</div>
</nav>
<div id="banner" class="banner" parallax=true
style="background: url('/img/bg/default.png') no-repeat center center; background-size: cover;">
<div class="full-bg-img">
<div class="mask flex-center" style="background-color: rgba(0, 0, 0, 0.3)">
<div class="banner-text text-center fade-in-up">
<div class="h2">
<span id="subtitle" data-typed-text="Diving into the ocean of knowledge..."></span>
</div>
</div>
<div class="scroll-down-bar">
<i class="iconfont icon-arrowdown"></i>
</div>
</div>
</div>
</div>
</div>
</header>
<main>
<div class="container nopadding-x-md">
<div id="board"
>
<div class="container">
<div class="row">
<div class="col-12 col-md-10 m-auto">
<div class="row mx-auto index-card">
<article class="col-12 col-md-12 mx-auto index-info">
<h2 class="index-header">
<a href="/2026/03/17/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0%EF%BC%88%E4%B9%9D%EF%BC%89%E7%AD%96%E7%95%A5%E5%87%BD%E6%95%B0%E8%BF%91%E4%BC%BC/" target="_self">
强化学习笔记(九)策略函数近似
</a>
</h2>
<a class="index-excerpt index-excerpt__noimg" href="/2026/03/17/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0%EF%BC%88%E4%B9%9D%EF%BC%89%E7%AD%96%E7%95%A5%E5%87%BD%E6%95%B0%E8%BF%91%E4%BC%BC/" target="_self">
<div>
强化学习笔记(九)策略函数近似 策略函数近似是强化学习中一种重要的方法,用于解决状态空间和动作空间过大或连续的问题。通过函数近似,我们可以用参数化模型来表示策略,从而避免直接存储所有状态和动作的概率分布。 策略函数的输入是状态,输出是动作的概率分布。常见的近似方法包括线性函数近似、神经网络近似和核方法。其中,神经网络近似最为常用,称为策略网络(Policy Network),记为 π(a|s
</div>
</a>
<div class="index-btm post-metas">
<div class="post-meta mr-3">
<i class="iconfont icon-date"></i>
<time datetime="2026-03-17 21:14" pubdate>
2026-03-17
</time>
</div>
<div class="post-meta mr-3 d-flex align-items-center">
<i class="iconfont icon-category"></i>
<span class="category-chains">
<span class="category-chain">
<a href="/categories/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/" class="category-chain-item">强化学习</a>
</span>
</span>
</div>
</div>
</article>
</div>
<div class="row mx-auto index-card">
<article class="col-12 col-md-12 mx-auto index-info">
<h2 class="index-header">
<a href="/2026/02/25/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0%EF%BC%88%E5%85%AB%EF%BC%89%E5%80%BC%E5%87%BD%E6%95%B0%E8%BF%91%E4%BC%BC/" target="_self">
强化学习笔记(八)值函数近似
</a>
</h2>
<a class="index-excerpt index-excerpt__noimg" href="/2026/02/25/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0%EF%BC%88%E5%85%AB%EF%BC%89%E5%80%BC%E5%87%BD%E6%95%B0%E8%BF%91%E4%BC%BC/" target="_self">
<div>
强化学习笔记(八):值函数近似 在实际场景中,我们的状态空间是非常大的,如果我们使用之前的建立一个个表格的方法,他的数据规模也会非常大,这显然不是一个很现实的解决方案。我们希望提出一种新的value function,他可以近似等于真正的value function,但是我们不再需要为每一个state建立一个映射,也就是一种和函数拟合思路类似的方法。在这一章,也是首次将神经网络引入强化学习。
</div>
</a>
<div class="index-btm post-metas">
<div class="post-meta mr-3">
<i class="iconfont icon-date"></i>
<time datetime="2026-02-25 11:16" pubdate>
2026-02-25
</time>
</div>
<div class="post-meta mr-3 d-flex align-items-center">
<i class="iconfont icon-category"></i>
<span class="category-chains">
<span class="category-chain">
<a href="/categories/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/" class="category-chain-item">强化学习</a>
</span>
</span>
</div>
</div>
</article>
</div>
<div class="row mx-auto index-card">
<article class="col-12 col-md-12 mx-auto index-info">
<h2 class="index-header">
<a href="/2026/02/09/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0%EF%BC%88%E4%B8%83%EF%BC%89%E6%97%B6%E5%BA%8F%E5%B7%AE%E5%88%86%E6%96%B9%E6%B3%95/" target="_self">
强化学习笔记(七)时序差分方法
</a>
</h2>
<a class="index-excerpt index-excerpt__noimg" href="/2026/02/09/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0%EF%BC%88%E4%B8%83%EF%BC%89%E6%97%B6%E5%BA%8F%E5%B7%AE%E5%88%86%E6%96%B9%E6%B3%95/" target="_self">
<div>
时序差分算法概述 时序差分(Temporal-Difference, TD)方法是强化学习中最核心的无模型学习技术之一,它结合了蒙特卡洛方法的采样能力与动态规划的自举思想,能够在无需环境模型的情况下进行在线、增量式更新。 核心思想是利用当前状态的价值估计与下一状态的价值估计之间的差异(TD误差)来更新价值函数。包括状态值的td学习、动作值的td学习、最优动作值估计的TD学习 时序差分算法的
</div>
</a>
<div class="index-btm post-metas">
<div class="post-meta mr-3">
<i class="iconfont icon-date"></i>
<time datetime="2026-02-09 22:16" pubdate>
2026-02-09
</time>
</div>
<div class="post-meta mr-3 d-flex align-items-center">
<i class="iconfont icon-category"></i>
<span class="category-chains">
<span class="category-chain">
<a href="/categories/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/" class="category-chain-item">强化学习</a>
</span>
</span>
</div>
</div>
</article>
</div>
<div class="row mx-auto index-card">
<article class="col-12 col-md-12 mx-auto index-info">
<h2 class="index-header">
<a href="/2026/01/28/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0%EF%BC%88%E5%85%AD%EF%BC%89%E9%9A%8F%E6%9C%BA%E8%BF%91%E4%BC%BC%E7%90%86%E8%AE%BA%E4%B8%8E%E9%9A%8F%E6%9C%BA%E6%A2%AF%E5%BA%A6%E4%B8%8B%E9%99%8D%E6%96%B9%E6%B3%95/" target="_self">
强化学习笔记(六)随机近似理论与随机梯度下降方法
</a>
</h2>
<a class="index-excerpt index-excerpt__noimg" href="/2026/01/28/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0%EF%BC%88%E5%85%AD%EF%BC%89%E9%9A%8F%E6%9C%BA%E8%BF%91%E4%BC%BC%E7%90%86%E8%AE%BA%E4%B8%8E%E9%9A%8F%E6%9C%BA%E6%A2%AF%E5%BA%A6%E4%B8%8B%E9%99%8D%E6%96%B9%E6%B3%95/" target="_self">
<div>
动机 回顾一下期望的定义,为什么我们要计算期望?是为了取平均。 为什么要计算期望?因为强化学习的本质就是求期望,求状态下能够获得的回报的期望、动作获得的未来回报的期望 求期望有两种方式: ①全量求期望,收集所有的样本然后求平均 ②增量的方式求期望。 增量的方式求期望,如图所示,是可以通过推导,得到wk + 1和wk之间的关系的。通过迭代最下面的式子可以实现来一个新增数据得一个期望
</div>
</a>
<div class="index-btm post-metas">
<div class="post-meta mr-3">
<i class="iconfont icon-date"></i>
<time datetime="2026-01-28 21:35" pubdate>
2026-01-28
</time>
</div>
<div class="post-meta mr-3 d-flex align-items-center">
<i class="iconfont icon-category"></i>
<span class="category-chains">
<span class="category-chain">
<a href="/categories/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/" class="category-chain-item">强化学习</a>
</span>
</span>
</div>
</div>
</article>
</div>
<div class="row mx-auto index-card">
<article class="col-12 col-md-12 mx-auto index-info">
<h2 class="index-header">
<a href="/2026/01/21/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0%EF%BC%88%E4%BA%94%EF%BC%89%E8%92%99%E7%89%B9%E5%8D%A1%E6%B4%9B%E6%96%B9%E6%B3%95/" target="_self">
强化学习笔记(五)蒙特卡洛算法
</a>
</h2>
<a class="index-excerpt index-excerpt__noimg" href="/2026/01/21/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0%EF%BC%88%E4%BA%94%EF%BC%89%E8%92%99%E7%89%B9%E5%8D%A1%E6%B4%9B%E6%96%B9%E6%B3%95/" target="_self">
<div>
强化学习笔记(五)蒙特卡洛算法 第四章描述的值迭代算法和策略迭代算法都是model-based的算法。 从这一章开始,我们引入蒙特卡洛学习,这是一种不需要模型的算法。也正是对应了强化学习的话:没有模型就得有数据,没有数据就得有模型。 蒙特卡洛方法来源于数学中的统计方法,比如说抛硬币问题,算抛硬币中正面的概率,用model based的方法就是直接算期望,直接知道p正=0.5,p反=0.5。
</div>
</a>
<div class="index-btm post-metas">
<div class="post-meta mr-3">
<i class="iconfont icon-date"></i>
<time datetime="2026-01-21 21:06" pubdate>
2026-01-21
</time>
</div>
<div class="post-meta mr-3 d-flex align-items-center">
<i class="iconfont icon-category"></i>
<span class="category-chains">
<span class="category-chain">
<a href="/categories/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/" class="category-chain-item">强化学习</a>
</span>
</span>
</div>
</div>
</article>
</div>
<div class="row mx-auto index-card">
<article class="col-12 col-md-12 mx-auto index-info">
<h2 class="index-header">
<a href="/2026/01/21/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0%EF%BC%88%E5%9B%9B%EF%BC%89%E5%80%BC%E8%BF%AD%E4%BB%A3%E5%92%8C%E7%AD%96%E7%95%A5%E8%BF%AD%E4%BB%A3/" target="_self">
强化学习笔记(四)值迭代和策略迭代
</a>
</h2>
<a class="index-excerpt index-excerpt__noimg" href="/2026/01/21/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0%EF%BC%88%E5%9B%9B%EF%BC%89%E5%80%BC%E8%BF%AD%E4%BB%A3%E5%92%8C%E7%AD%96%E7%95%A5%E8%BF%AD%E4%BB%A3/" target="_self">
<div>
一、值迭代算法 如何求解贝尔曼等式?在上一章中,已经知道了求解贝尔曼公式核心是求解一个f(v)=v的不动点问题,通过contraction mapping 定理可知,可以使用迭代的方式求解。 贝尔曼公式中蕴含着π,π和v是绑定的。因此,需要通过两个步骤去求解。 第一步【优化策略(policy update)】:是在给定vk的情况下求解πk+1 第二步【优化值(value upda
</div>
</a>
<div class="index-btm post-metas">
<div class="post-meta mr-3">
<i class="iconfont icon-date"></i>
<time datetime="2026-01-21 20:25" pubdate>
2026-01-21
</time>
</div>
<div class="post-meta mr-3 d-flex align-items-center">
<i class="iconfont icon-category"></i>
<span class="category-chains">
<span class="category-chain">
<a href="/categories/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/" class="category-chain-item">强化学习</a>
</span>
</span>
</div>
</div>
</article>
</div>
<div class="row mx-auto index-card">
<article class="col-12 col-md-12 mx-auto index-info">
<h2 class="index-header">
<a href="/2026/01/19/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0%EF%BC%88%E4%B8%89%EF%BC%89%E8%B4%9D%E5%B0%94%E6%9B%BC%E6%9C%80%E4%BC%98%E6%96%B9%E7%A8%8B/" target="_self">
强化学习笔记(三)贝尔曼最优方程
</a>
</h2>
<a class="index-excerpt index-excerpt__noimg" href="/2026/01/19/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0%EF%BC%88%E4%B8%89%EF%BC%89%E8%B4%9D%E5%B0%94%E6%9B%BC%E6%9C%80%E4%BC%98%E6%96%B9%E7%A8%8B/" target="_self">
<div>
1. 动机 动作的价值对我们来说很重要,动作的价值是与当前自己要执行的动作的概率无关的,我们从grid model案例中可以得到:qπ(s, a) = r + γv(s′),当前的动作是好与坏可以通过当前的动作的价值Q得到。 那么很明显,当前最好的动作就是q值最大的动作,也就是a* = argmaxaqπ(s, a)。 如果强化学习模型没有训练,策略是随机的,那么价值Q就不一定能够合理评估
</div>
</a>
<div class="index-btm post-metas">
<div class="post-meta mr-3">
<i class="iconfont icon-date"></i>
<time datetime="2026-01-19 21:02" pubdate>
2026-01-19
</time>
</div>
<div class="post-meta mr-3 d-flex align-items-center">
<i class="iconfont icon-category"></i>
<span class="category-chains">
<span class="category-chain">
<a href="/categories/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/" class="category-chain-item">强化学习</a>
</span>
</span>
</div>
</div>
</article>
</div>
<div class="row mx-auto index-card">
<article class="col-12 col-md-12 mx-auto index-info">
<h2 class="index-header">
<a href="/2026/01/05/%E5%9C%A8Windows%E4%B8%8A%E5%BF%AB%E9%80%9F%E8%BF%90%E8%A1%8C%E5%B9%B6%E8%B0%83%E8%AF%95LLMLight/" target="_self">
轻松在Windows上配置并运行LLMLight项目
</a>
</h2>
<a class="index-excerpt index-excerpt__noimg" href="/2026/01/05/%E5%9C%A8Windows%E4%B8%8A%E5%BF%AB%E9%80%9F%E8%BF%90%E8%A1%8C%E5%B9%B6%E8%B0%83%E8%AF%95LLMLight/" target="_self">
<div>
轻松在Windows上配置并运行LLMLight项目 1. Introduction 近年来,基于强化学习的信号控制逐步迭代,已经发展出了基于大语言模型的LLMLight。LLMLight是一种将大型语言模型(LLM)作为TSC决策代理的新框架。传统的TSC方法主要基于交通工程和强化学习(RL),在不同交通场景下的泛化能力通常有限,并且缺乏可解释性。该框架首先通过提供包含实时交通状况的知识性提
</div>
</a>
<div class="index-btm post-metas">
<div class="post-meta mr-3">
<i class="iconfont icon-date"></i>
<time datetime="2026-01-05 21:09" pubdate>
2026-01-05
</time>
</div>
<div class="post-meta mr-3 d-flex align-items-center">
<i class="iconfont icon-category"></i>
<span class="category-chains">
<span class="category-chain">
<a href="/categories/%E6%93%8D%E4%BD%9C%E7%B3%BB%E7%BB%9F/" class="category-chain-item">操作系统</a>
</span>
</span>
</div>
</div>
</article>
</div>
<div class="row mx-auto index-card">
<article class="col-12 col-md-12 mx-auto index-info">
<h2 class="index-header">
<a href="/2026/01/04/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0%EF%BC%88%E4%B8%80%EF%BC%89%E5%9F%BA%E7%A1%80%E6%A6%82%E5%BF%B5/" target="_self">
强化学习笔记(一)基础概念
</a>
</h2>
<a class="index-excerpt index-excerpt__noimg" href="/2026/01/04/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0%EF%BC%88%E4%B8%80%EF%BC%89%E5%9F%BA%E7%A1%80%E6%A6%82%E5%BF%B5/" target="_self">
<div>
强化学习笔记(一)基础概念 本笔记是b站的 西湖大学强化学习课程 的笔记。并附带一些个人的思考。 # 一、基础概念 强化学习各章节 基础概念 贝尔曼方程 贝尔曼最优方程 值迭代&策略迭代 蒙特卡洛学习 随机估计 时序差分学习 值函数估计 策略梯度方法 Actor-Critic 方法 章节关系 章节1-3 强化学习的基础工具 章节4-10 强化学习的算法
</div>
</a>
<div class="index-btm post-metas">
<div class="post-meta mr-3">
<i class="iconfont icon-date"></i>
<time datetime="2026-01-04 20:10" pubdate>
2026-01-04
</time>
</div>
<div class="post-meta mr-3 d-flex align-items-center">
<i class="iconfont icon-category"></i>
<span class="category-chains">
<span class="category-chain">
<a href="/categories/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/" class="category-chain-item">强化学习</a>
</span>
</span>
</div>
</div>
</article>
</div>
<div class="row mx-auto index-card">
<article class="col-12 col-md-12 mx-auto index-info">
<h2 class="index-header">
<a href="/2026/01/04/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0%EF%BC%88%E4%BA%8C%EF%BC%89%E8%B4%9D%E5%B0%94%E6%9B%BC%E6%96%B9%E7%A8%8B/" target="_self">
强化学习笔记(二)贝尔曼方程
</a>
</h2>
<a class="index-excerpt index-excerpt__noimg" href="/2026/01/04/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0%E7%AC%94%E8%AE%B0%EF%BC%88%E4%BA%8C%EF%BC%89%E8%B4%9D%E5%B0%94%E6%9B%BC%E6%96%B9%E7%A8%8B/" target="_self">
<div>
策略评估的方式 如何不断的改进策略?用什么来评估:答案是基于return进行评估。 return是一条控制轨迹能够获得的奖励或折扣奖励的和,能够用来评估当前状态的价值。 状态的价值 举个例子,在Grid机器人里,现在有3条路径: 路径1:智能体从S1到S3,奖励是0,从S3到S4,奖励是1,呆在S4,一直有奖励1. 路径2:智能体从S1到S2,奖励是-1,从S2到S4,奖励是1,呆在
</div>
</a>
<div class="index-btm post-metas">
<div class="post-meta mr-3">
<i class="iconfont icon-date"></i>
<time datetime="2026-01-04 20:10" pubdate>
2026-01-04
</time>
</div>
<div class="post-meta mr-3 d-flex align-items-center">
<i class="iconfont icon-category"></i>
<span class="category-chains">
<span class="category-chain">
<a href="/categories/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/" class="category-chain-item">强化学习</a>
</span>
</span>
</div>
</div>
</article>
</div>
<nav aria-label="navigation">
<span class="pagination" id="pagination">
<span class="page-number current">1</span><a class="page-number" href="/page/2/#board">2</a><a class="page-number" href="/page/3/#board">3</a><a class="extend next" rel="next" href="/page/2/#board"><i class="iconfont icon-arrowright"></i></a>
</span>
</nav>
</div>
</div>
</div>
</div>
</div>
<a id="scroll-top-button" aria-label="TOP" href="#" role="button">
<i class="iconfont icon-arrowup" aria-hidden="true"></i>
</a>
<div class="modal fade" id="modalSearch" tabindex="-1" role="dialog" aria-labelledby="ModalLabel"
aria-hidden="true">
<div class="modal-dialog modal-dialog-scrollable modal-lg" role="document">
<div class="modal-content">
<div class="modal-header text-center">
<h4 class="modal-title w-100 font-weight-bold">搜索</h4>
<button type="button" id="local-search-close" class="close" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body mx-3">
<div class="md-form mb-5">
<input type="text" id="local-search-input" class="form-control validate">
<label data-error="x" data-success="v" for="local-search-input">关键词</label>
</div>
<div class="list-group" id="local-search-result"></div>
</div>
</div>
</div>
</div>
</main>
<footer>
<div class="footer-inner">
<div class="footer-content">
<a href="https://runsstudio.github.io" target="_blank" rel="nofollow noopener"><span>Run's studio © 2025</span></a> <i class="iconfont icon-love"></i> <a href="https://github.com/fluid-dev/hexo-theme-fluid" target="_blank" rel="nofollow noopener"><span>Fluid</span></a>
</div>
</div>
</footer>
<!-- Scripts -->
<script src="https://lib.baomitu.com/nprogress/0.2.0/nprogress.min.js" ></script>
<link rel="stylesheet" href="https://lib.baomitu.com/nprogress/0.2.0/nprogress.min.css" />
<script>
NProgress.configure({"showSpinner":false,"trickleSpeed":100})
NProgress.start()
window.addEventListener('load', function() {
NProgress.done();
})
</script>
<script src="https://lib.baomitu.com/jquery/3.6.4/jquery.min.js" ></script>
<script src="https://lib.baomitu.com/twitter-bootstrap/4.6.1/js/bootstrap.min.js" ></script>
<script src="/js/events.js" ></script>
<script src="/js/plugins.js" ></script>
<script src="https://lib.baomitu.com/typed.js/2.0.12/typed.min.js" ></script>
<script>
(function (window, document) {
var typing = Fluid.plugins.typing;
var subtitle = document.getElementById('subtitle');
if (!subtitle || !typing) {
return;
}
var text = subtitle.getAttribute('data-typed-text');
typing(text);
})(window, document);
</script>
<script src="/js/img-lazyload.js" ></script>
<script src="/js/local-search.js" ></script>
<!-- 主题的启动项,将它保持在最底部 -->
<!-- the boot of the theme, keep it at the bottom -->
<script src="/js/boot.js" ></script>
<noscript>
<div class="noscript-warning">博客在允许 JavaScript 运行的环境下浏览效果更佳</div>
</noscript>
</body>
</html>