Linux内存控制器(一)
1. memory_cgrp_subsys
// cftype: 用于定义和描述控制组的控制文件
// cftype->private:描述资源类型和资源属性
// dfl_cftypes和legacy_cftypes都是cftype的成员
struct cgroup_subsys memory_cgrp_subsys = {.css_alloc = mem_cgroup_css_alloc,.css_online = mem_cgroup_css_online,.css_offline = mem_cgroup_css_offline,.css_released = mem_cgroup_css_released,.css_free = mem_cgroup_css_free,.css_reset = mem_cgroup_css_reset,.can_attach = mem_cgroup_can_attach,.cancel_attach = mem_cgroup_cancel_attach,.post_attach = mem_cgroup_move_task,.bind = mem_cgroup_bind,// 默认层级.dfl_cftypes = memory_files,// 子层级.legacy_cftypes = mem_cgroup_legacy_files,.early_init = 0,
};
2. dfl_cftypes
static struct cftype memory_files[] = {{// 控制组和所有子控制组的当前内存使用量.name = "current",.flags = CFTYPE_NOT_ON_ROOT,.read_u64 = memory_current_read,},{// 内存使用低界限.name = "low",.flags = CFTYPE_NOT_ON_ROOT,.seq_show = memory_low_show,.write = memory_low_write,},{// 内存使用高界限.name = "high",.flags = CFTYPE_NOT_ON_ROOT,.seq_show = memory_high_show,.write = memory_high_write,},{// 内存使用硬限制.name = "max",.flags = CFTYPE_NOT_ON_ROOT,.seq_show = memory_max_show,.write = memory_max_write,},{// 内存事件.name = "events",.flags = CFTYPE_NOT_ON_ROOT,.file_offset = offsetof(struct mem_cgroup, events_file),.seq_show = memory_events_show,},{// 查看内存使用的各种统计值.name = "stat",.flags = CFTYPE_NOT_ON_ROOT,.seq_show = memory_stat_show,},{ } /* terminate */
};static struct cftype swap_files[] = {{// 控制组和所有子控制组当前交换分区使用量.name = "swap.current",.flags = CFTYPE_NOT_ON_ROOT,.read_u64 = swap_current_read,},{// 交换分区使用硬限制.name = "swap.max",.flags = CFTYPE_NOT_ON_ROOT,.seq_show = swap_max_show,.write = swap_max_write,},{ } /* terminate */
};
3. legacy_cftypes
// 根控制组对资源使用量没有限制,并且不允许在根控制组配置资源使用限制
// 进程默认属于根控制组,创建子进程时,子进程继承父进程加入的控制组
static struct cftype mem_cgroup_legacy_files[] = {{// 当前内存使用量.name = "usage_in_bytes",.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),// 见第6节.read_u64 = mem_cgroup_read_u64,},{// 记录的最大内存使用量.name = "max_usage_in_bytes",.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),// 见第8节.write = mem_cgroup_reset,.read_u64 = mem_cgroup_read_u64,},{// 内存使用硬限制.name = "limit_in_bytes",.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),// 见第7节.write = mem_cgroup_write,.read_u64 = mem_cgroup_read_u64,},{// 内存使用软限制.name = "soft_limit_in_bytes",.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),.write = mem_cgroup_write,.read_u64 = mem_cgroup_read_u64,},{.name = "failcnt",.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),.write = mem_cgroup_reset,.read_u64 = mem_cgroup_read_u64,},{// 内存使用统计值.name = "stat",.seq_show = memcg_stat_show,},{.name = "force_empty",.write = mem_cgroup_force_empty_write,},{// 使用分层记账: 启用后子树中的所有内存控制组的内存使用都会被记账到这个内存控制组.name = "use_hierarchy",.write_u64 = mem_cgroup_hierarchy_write,.read_u64 = mem_cgroup_hierarchy_read,},{// 注册内存监控事件.name = "cgroup.event_control", /* XXX: for compat */.write = memcg_write_event_control,.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,},{.name = "swappiness",.read_u64 = mem_cgroup_swappiness_read,.write_u64 = mem_cgroup_swappiness_write,},{.name = "move_charge_at_immigrate",.read_u64 = mem_cgroup_move_charge_read,.write_u64 = mem_cgroup_move_charge_write,},{// 是否禁止oom killer杀进程.name = "oom_control",.seq_show = mem_cgroup_oom_control_read,.write_u64 = mem_cgroup_oom_control_write,.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),},{// 内存压力级别.name = "pressure_level",},
#ifdef CONFIG_NUMA{.name = "numa_stat",.seq_show = memcg_numa_stat_show,},
#endif{// 内核内存使用限制.name = "kmem.limit_in_bytes",.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),.write = mem_cgroup_write,.read_u64 = mem_cgroup_read_u64,},{// 内核内存使用量.name = "kmem.usage_in_bytes",.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),.read_u64 = mem_cgroup_read_u64,},{.name = "kmem.failcnt",.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),.write = mem_cgroup_reset,.read_u64 = mem_cgroup_read_u64,},{// 记录的最大内核内存使用使用量.name = "kmem.max_usage_in_bytes",.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),.write = mem_cgroup_reset,.read_u64 = mem_cgroup_read_u64,},
#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG){.name = "kmem.slabinfo",.seq_start = memcg_slab_start,.seq_next = memcg_slab_next,.seq_stop = memcg_slab_stop,.seq_show = memcg_slab_show,},
#endif{// tcp缓冲区内存使用限制.name = "kmem.tcp.limit_in_bytes",.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),.write = mem_cgroup_write,.read_u64 = mem_cgroup_read_u64,},{// tcp缓冲区内存使用量.name = "kmem.tcp.usage_in_bytes",.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),.read_u64 = mem_cgroup_read_u64,},{.name = "kmem.tcp.failcnt",.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),.write = mem_cgroup_reset,.read_u64 = mem_cgroup_read_u64,},{// 记录的最大tcp缓冲区内存使用量.name = "kmem.tcp.max_usage_in_bytes",.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),.write = mem_cgroup_reset,.read_u64 = mem_cgroup_read_u64,},{ }, /* terminate */
};static struct cftype memsw_cgroup_files[] = {{// 内存+交换分区内存使用量.name = "memsw.usage_in_bytes",.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),.read_u64 = mem_cgroup_read_u64,},{// 记录的内存+交换分区最大内存使用量.name = "memsw.max_usage_in_bytes",.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),.write = mem_cgroup_reset,.read_u64 = mem_cgroup_read_u64,},{// 记录的内存+交换分区内存使用量.name = "memsw.limit_in_bytes",.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),.write = mem_cgroup_write,.read_u64 = mem_cgroup_read_u64,},{.name = "memsw.failcnt",.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),.write = mem_cgroup_reset,.read_u64 = mem_cgroup_read_u64,},{ }, /* terminate */
};
4. mem_cgroup
struct mem_cgroup {// 所有资源控制器的基类struct cgroup_subsys_state css;/* Private memcg ID. Used to ID objects that outlive the cgroup */struct mem_cgroup_id id;/* Accounted resources */// _MEM类型的内存计数器: 记录内存的限制和当前使用量[见5.1节]struct page_counter memory;struct page_counter swap;/* Legacy consumer-oriented counters */// _MEMSWAP类型的内存计数器: 记录内存+交换分区的限制和当前使用量struct page_counter memsw;// _KMEM类型的内核内存计数器: 记录内核内存的限制和当前使用量struct page_counter kmem;// _TCP类型的tcp缓冲区计数器: 记录tcp缓冲区的限制和当前使用量struct page_counter tcpmem;/* Normal memory consumption range */// 内存使用低界限unsigned long low;// 内存使用高界限unsigned long high;/* Range enforcement for interrupt charges */struct work_struct high_work;// 内存使用软限制unsigned long soft_limit;/* vmpressure notifications */struct vmpressure vmpressure;/** Should the accounting and control be hierarchical, per subtree?*/// 是否使用分层记账bool use_hierarchy;/* protected by memcg_oom_lock */bool oom_lock;int under_oom;int swappiness;/* OOM-Killer disable */int oom_kill_disable;/* handle for "memory.events" */struct cgroup_file events_file;/* protect arrays of thresholds */struct mutex thresholds_lock;/* thresholds for memory usage. RCU-protected */struct mem_cgroup_thresholds thresholds;/* thresholds for mem+swap usage. RCU-protected */struct mem_cgroup_thresholds memsw_thresholds;/* For oom notifier event fd */struct list_head oom_notify;/** Should we move charges of a task when a task is moved into this* mem_cgroup ? And what type of charges should we move ?*/unsigned long move_charge_at_immigrate;/** set > 0 if pages under this cgroup are moving to other cgroup.*/atomic_t moving_account;/* taken only while moving_account > 0 */spinlock_t move_lock;struct task_struct *move_lock_task;unsigned long move_lock_flags;/** percpu counter.*/// 每cpu变量: 统计内存控制组状态(包括内存使用量和内存事件)[见5.3节]struct mem_cgroup_stat_cpu __percpu *stat;unsigned long socket_pressure;/* Legacy tcp memory accounting */bool tcpmem_active;int tcpmem_pressure;#ifndef CONFIG_SLOB/* Index in the kmem_cache->memcg_params.memcg_caches array */int kmemcg_id;enum memcg_kmem_state kmem_state;struct list_head kmem_caches;
#endifint last_scanned_node;
#if MAX_NUMNODES > 1nodemask_t scan_nodes;atomic_t numainfo_events;atomic_t numainfo_updating;
#endif#ifdef CONFIG_CGROUP_WRITEBACKstruct list_head cgwb_list;struct wb_domain cgwb_domain;
#endif/* List of events which userspace want to receive */struct list_head event_list;spinlock_t event_list_lock;// 每个节点对应一个mem_cgroup_per_node实例[见5.2节]struct mem_cgroup_per_node *nodeinfo[0];/* WARNING: nodeinfo must be the last member here */
};
4.1 page_counter
// 页面计数器
struct page_counter {// 计数值atomic_long_t count;// 硬限制unsigned long limit;// 如果父控制组使用use_hierarchy, 则parent指向父控制组的页面计数器, 否则时空指针struct page_counter *parent;/* legacy */// 记录计数值的历史最大值unsigned long watermark;// 命中限制的次数unsigned long failcnt;
};
4.2 mem_cgroup_per_node
/** per-zone information in memory controller.*/
struct mem_cgroup_per_node {// 内存控制组私有的lru链表// 当进程加入内存控制组后, 给进程分配的页面不再加入node的lru链表, 而是加入内存控制组私有的lru链表struct lruvec lruvec;struct lruvec_stat __percpu *lruvec_stat;unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];struct rb_node tree_node; /* RB tree node */// 内存使用量超过软限制的数值 = mem_cgroup.memory.count - mem_cgroup.soft_limitunsigned long usage_in_excess;/* Set to the value by which *//* the soft limit is exceeded*/// 表示内存控制组是否在软限制树种// 当内存使用量超过软限制时, 通过成员tree_node把mem_cgroup_per_node实例加入软限制树bool on_tree;// 指向mem_cgroup_per_node实例所属的内存控制组struct mem_cgroup *memcg; /* Back pointer, we cannot *//* use container_of */
};
4.3 mem_cgroup_stat_cpu
struct mem_cgroup_stat_cpu {// 统计控制组内不同状态page的使用量[见5.4节]long count[MEMCG_NR_STAT];// 统计控制组内发生的不同类型事件的次数[见5.5节]unsigned long events[MEMCG_NR_EVENTS];unsigned long nr_page_events;// 统计控制组不同目标发生事件的次数[见5.6节]unsigned long targets[MEM_CGROUP_NTARGETS];
};
4.4 memcg_stat_item
// cgroup自定义的page状态
enum memcg_stat_item {// 文件缓存MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,// 匿名内存MEMCG_RSS,// 匿名巨页MEMCG_RSS_HUGE,// swap缓存MEMCG_SWAP,MEMCG_SOCK,/* XXX: why are these zone and not node counters? */MEMCG_KERNEL_STACK_KB,MEMCG_NR_STAT,
};
4.5 memcg_event_item
/* Cgroup-specific events, on top of universal VM events */
enum memcg_event_item {MEMCG_LOW = NR_VM_EVENT_ITEMS,MEMCG_HIGH,MEMCG_MAX,MEMCG_OOM,MEMCG_NR_EVENTS,
};
4.6 mem_cgroup_events_target
enum mem_cgroup_events_target {MEM_CGROUP_TARGET_THRESH,MEM_CGROUP_TARGET_SOFTLIMIT,MEM_CGROUP_TARGET_NUMAINFO,MEM_CGROUP_NTARGETS,
};
5. mem_cgroup_read_u64
// 资源类型
enum res_type {_MEM,_MEMSWAP,_OOM_TYPE,_KMEM,_TCP,
};// 资源属性
enum {RES_USAGE,RES_LIMIT,RES_MAX_USAGE,RES_FAILCNT,RES_SOFT_LIMIT,
};static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,struct cftype *cft)
{struct mem_cgroup *memcg = mem_cgroup_from_css(css);struct page_counter *counter;// 解析资源类型, 从mem_cgroup中选择对应的页面计数器switch (MEMFILE_TYPE(cft->private)) {case _MEM:counter = &memcg->memory;break;case _MEMSWAP:counter = &memcg->memsw;break;case _KMEM:counter = &memcg->kmem;break;case _TCP:counter = &memcg->tcpmem;break;default:BUG();}// 解析资源属性switch (MEMFILE_ATTR(cft->private)) {case RES_USAGE:// 读取usage_in_bytes数据[见6.1节]if (counter == &memcg->memory)return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;// 读取memsw.usage_in_bytes数据if (counter == &memcg->memsw)return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;return (u64)page_counter_read(counter) * PAGE_SIZE;case RES_LIMIT:// 读取*.limit_in_bytes即内存使用限制值return (u64)counter->limit * PAGE_SIZE;case RES_MAX_USAGE:// 读取*.max_usage_in_bytes即历史最大内存使用量return (u64)counter->watermark * PAGE_SIZE;case RES_FAILCNT:// 读取*.failcnt数据return counter->failcnt;case RES_SOFT_LIMIT:// 读取软限制值return (u64)memcg->soft_limit * PAGE_SIZE;default:BUG();}
}
5.1 mem_cgroup_usage
static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
{unsigned long val = 0;// 根控制组if (mem_cgroup_is_root(memcg)) {struct mem_cgroup *iter;// 遍历根控制组下所有子控制组for_each_mem_cgroup_tree(iter, memcg) {// 读取控制组内page cache的数量[见6.2节]val += memcg_page_state(iter, MEMCG_CACHE);// 读取控制组内anonymous page的数量val += memcg_page_state(iter, MEMCG_RSS);// 如果开启swap, 还要统计swap cache的数量if (swap)val += memcg_page_state(iter, MEMCG_SWAP);}} else {if (!swap)// 读取_MEM类型page_counter->usageval = page_counter_read(&memcg->memory);else// 读取_MEMSWAP类型page_counter->usageval = page_counter_read(&memcg->memsw);}return val;
}
5.2 memcg_page_state
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,int idx)
{// 遍历每个cpu上的mem_cgroup_stat_cpu, 并统计该控制组内由idx指定状态的页面数量for_each_possible_cpu(cpu)val += per_cpu(memcg->stat->count[idx], cpu);if (val < 0)val = 0;return val;
}
6. mem_cgroup_write
static ssize_t mem_cgroup_write(struct kernfs_open_file *of,char *buf, size_t nbytes, loff_t off)
{// 根据接口文件找到其对应的控制组struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));unsigned long nr_pages;int ret;buf = strstrip(buf);// 解析写入的参数:将参数(可能带有后缀K, M, G, T, P, E)解析为页面数量ret = page_counter_memparse(buf, "-1", &nr_pages);if (ret)return ret;// 解析资源类型switch (MEMFILE_ATTR(of_cft(of)->private)) {case RES_LIMIT:// 如前面所说: 根控制组不能设置使用限制if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ret = -EINVAL;break;}// 解析资源属性: 写入*.limit_in_bytes文件即设置硬限制switch (MEMFILE_TYPE(of_cft(of)->private)) {case _MEM:// 设置内存使用硬限制[见7.1节]ret = mem_cgroup_resize_limit(memcg, nr_pages);break;case _MEMSWAP:ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);break;case _KMEM:ret = memcg_update_kmem_limit(memcg, nr_pages);break;case _TCP:ret = memcg_update_tcp_limit(memcg, nr_pages);break;}break;// 设置软限制case RES_SOFT_LIMIT:memcg->soft_limit = nr_pages;ret = 0;break;}return ret ?: nbytes;
}
6.1 mem_cgroup_resize_limit
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,unsigned long limit)
{unsigned long curusage;unsigned long oldusage;bool enlarge = false;int retry_count;int ret;/** For keeping hierarchical_reclaim simple, how long we should retry* is depends on callers. We set our retry-count to be function* of # of children which we should visit in this loop.*/retry_count = MEM_CGROUP_RECLAIM_RETRIES *mem_cgroup_count_children(memcg);// 返回当前内存使用硬限制oldusage = page_counter_read(&memcg->memory);do {if (signal_pending(current)) {ret = -EINTR;break;}mutex_lock(&memcg_limit_mutex);// 新的限制不能超过内存+交换分区的限制if (limit > memcg->memsw.limit) {mutex_unlock(&memcg_limit_mutex);ret = -EINVAL;break;}// 增大硬限制if (limit > memcg->memory.limit)enlarge = true;// 更新硬限制ret = page_counter_limit(&memcg->memory, limit);mutex_unlock(&memcg_limit_mutex);// 更新成功则跳过跳出循环if (!ret)break;// 否则代表当前使用量已经超过硬限制, 需要针对该控制组进行内存回收try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);// 回收完之后再次读取内存使用量curusage = page_counter_read(&memcg->memory);/* Usage is reduced ? */// 如果内存使用量仍然比之前大, 则进行重试if (curusage >= oldusage)retry_count--;else// 否则进行重试, 直到使用量小于硬限制oldusage = curusage;} while (retry_count);if (!ret && enlarge)memcg_oom_recover(memcg);return ret;
}
7. mem_cgroup_reset
static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,size_t nbytes, loff_t off)
{struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));struct page_counter *counter;// 解析资源类型switch (MEMFILE_TYPE(of_cft(of)->private)) {case _MEM:counter = &memcg->memory;break;case _MEMSWAP:counter = &memcg->memsw;break;case _KMEM:counter = &memcg->kmem;break;case _TCP:counter = &memcg->tcpmem;break;default:BUG();}// 解析资源属性switch (MEMFILE_ATTR(of_cft(of)->private)) {// 设置*max_usage_in_bytescase RES_MAX_USAGE:// 更新历史最大使用量[见8.1节]page_counter_reset_watermark(counter);break;case RES_FAILCNT:counter->failcnt = 0;break;default:BUG();}return nbytes;
}
7.1 page_counter_reset_watermark
static inline void page_counter_reset_watermark(struct page_counter *counter)
{// 将当前内存使用量更新为历史最大值counter->watermark = page_counter_read(counter);
}
Linux内存控制器(一)相关推荐
- Linux内存控制器(二)
1. memcg_stock_pcp // 每处理器记账缓存一次从内存控制组批量申请32页, 然后把内存控制组的内存使用量加上32页 #define CHARGE_BATCH 32U // 在内存控制 ...
- Linux内存技术分析(上)
Linux内存技术分析(上) 一.Linux存储器 限于存储介质的存取速率和成本,现代计算机的存储结构呈现为金字塔型.越往塔顶,存取效率越高.但成本也越高,所以容量也就越小.得益于程序访问的局部性原理 ...
- 嵌入式Linux内存压力测试
原文参考:添加链接描述 1 前言 内存是电子计算机的最重要组成要素之一. 与内存对应的就是外存,如硬盘.外部存储器等.内存是将外存与CPU连接起来的桥梁,计算机中所有数据都需经过内存进行交互,而且 ...
- linux为系统分配内存,Linux操作系统知识讲解:走进Linux 内存分配算法
Linux 内存分配算法 内存管理算法--对讨厌自己管理内存的人来说是天赐的礼物 1.内存碎片 1) 基本原理 产生原因:内存分配较小,并且分配的这些小的内存生存周期又较长,反复申请后将产生内存碎片的 ...
- 探索 Linux 内存模型--转
引用:http://www.ibm.com/developerworks/cn/linux/l-memmod/index.html 理解 Linux 使用的内存模型是从更大程度上掌握 Linux 设计 ...
- Linux内存管理初探
linux内存是后台开发人员,需要深入了解的计算机资源.合理的使用内存,有助于提升机器的性能和稳定性.本文主要介绍linux内存组织结构和页面布局,内存碎片产生原因和优化算法,linux内核几种内存管 ...
- Linux内存之Cache
一. Linux内存之Cache 1.1.Cache 1.1.1.什么是Cache? Cache存储器,是位于CPU和主存储器DRAM之间的一块高速缓冲存储器,规模较小,但是速度很快,通常由SRAM( ...
- Linux内存管理:ARM64体系结构与编程之cache(3):cache一致性协议(MESI、MOESI)、cache伪共享
目录 为什么系统软件人员要深入了解cache? cache一致性协议 神马是MESI协议? MESI的操作 MESI状态图 演示:初始化状态为I的cache line 当本地CPU的缓存行状态为I时, ...
- Linux内存管理:ARM64体系结构与编程之cache(2):cache一致性
<Linux内存管理:ARM64体系结构与编程之cache(1)> <Linux内存管理:ARM64体系结构与编程之cache(2)> <ARM SMMU原理与IOMMU ...
最新文章
- 小程序使用富文本完整代码及示例图
- linux除了eeprom其他的保存方法,linux的EEPROM的读写控制.doc
- 深入理解JMM(Java内存模型) --(六)final
- PHP快速入门 如何操作MySQL
- TFRecords转化和读取
- 两平面平行方向向量关系_立体几何平行证明的四大必杀绝技------赞!很赞!!非常赞!!!...
- 开源软件生态_基础,亮点和建立成功的开源生态系统
- codeforces hack
- 跑动大数据的笔记本配置_大数据说话|你比一般跑者跑得更快吗?
- 天然纤维复合材料行业调研报告 - 市场现状分析与发展前景预测
- MySQL的InnoDB表如何设计主键索引-转自淘宝MySQL经典案例
- python_生成器
- javascript跑马灯效果
- 电路板故障测试仪GR4080软件,电路板故障检测仪
- #pragma once用法
- python结果四舍五入保留两位小数_python中四舍五入的正确打开方式
- 推荐算法可以做到千人千面,但是千人千面的流量利用效率一定是优于人工分发吗?
- 比特鹏哥网课笔记(结构体,枚举,联合体,通讯录项目)
- 高德地图WEB端软件应用
- Nyoj 954 N!