init.c 6.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3
/*
 *  S390 version
4
 *    Copyright IBM Corp. 1999
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *    Author(s): Hartmut Penner (hp@de.ibm.com)
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1995  Linus Torvalds
 */

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
25
#include <linux/memory.h>
26
#include <linux/pfn.h>
27
#include <linux/poison.h>
28
#include <linux/initrd.h>
29
#include <linux/export.h>
30
#include <linux/cma.h>
31
#include <linux/gfp.h>
32
#include <linux/memblock.h>
L
Linus Torvalds 已提交
33
#include <asm/processor.h>
34
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
35 36 37 38 39 40
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/lowcore.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
41
#include <asm/sections.h>
42
#include <asm/ctl_reg.h>
43
#include <asm/sclp.h>
L
Laura Abbott 已提交
44
#include <asm/set_memory.h>
45
#include <asm/kasan.h>
L
Linus Torvalds 已提交
46

47
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
48

49
unsigned long empty_zero_page, zero_page_mask;
50
EXPORT_SYMBOL(empty_zero_page);
51
EXPORT_SYMBOL(zero_page_mask);
L
Linus Torvalds 已提交
52

53
static void __init setup_zero_pages(void)
54 55 56 57 58
{
	unsigned int order;
	struct page *page;
	int i;

59 60 61
	/* Latest machines require a mapping granularity of 512KB */
	order = 7;

62
	/* Limit number of empty zero pages for small memory sizes */
63 64
	while (order > 2 && (totalram_pages >> 10) < (1UL << order))
		order--;
65 66 67 68 69 70 71 72

	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
	if (!empty_zero_page)
		panic("Out of memory in setup_zero_pages");

	page = virt_to_page((void *) empty_zero_page);
	split_page(page, order);
	for (i = 1 << order; i > 0; i--) {
73
		mark_page_reserved(page);
74 75 76
		page++;
	}

77
	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
78 79
}

L
Linus Torvalds 已提交
80 81 82 83 84
/*
 * paging_init() sets up the page tables
 */
void __init paging_init(void)
{
85
	unsigned long max_zone_pfns[MAX_NR_ZONES];
86
	unsigned long pgd_type, asce_bits;
87
	psw_t psw;
88

89
	init_mm.pgd = swapper_pg_dir;
90
	if (VMALLOC_END > _REGION2_SIZE) {
91 92 93 94 95 96
		asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
		pgd_type = _REGION2_ENTRY_EMPTY;
	} else {
		asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
		pgd_type = _REGION3_ENTRY_EMPTY;
	}
97 98
	init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
	S390_lowcore.kernel_asce = init_mm.context.asce;
99
	S390_lowcore.user_asce = S390_lowcore.kernel_asce;
100
	crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
H
Heiko Carstens 已提交
101
	vmem_map_init();
102
	kasan_copy_shadow(init_mm.pgd);
L
Linus Torvalds 已提交
103

104
	/* enable virtual mapping in kernel mode */
105 106 107
	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
	__ctl_load(S390_lowcore.kernel_asce, 7, 7);
	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
108
	psw.mask = __extract_psw();
109
	psw_bits(psw).dat = 1;
H
Heiko Carstens 已提交
110
	psw_bits(psw).as = PSW_BITS_AS_HOME;
111
	__load_psw_mask(psw.mask);
L
Linus Torvalds 已提交
112

113 114
	sparse_memory_present_with_active_regions(MAX_NUMNODES);
	sparse_init();
115 116 117 118
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
	max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
	free_area_init_nodes(max_zone_pfns);
L
Linus Torvalds 已提交
119 120
}

H
Heiko Carstens 已提交
121 122
void mark_rodata_ro(void)
{
123 124 125 126
	unsigned long size = __end_ro_after_init - __start_ro_after_init;

	set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT);
	pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
H
Heiko Carstens 已提交
127 128
}

L
Linus Torvalds 已提交
129 130
void __init mem_init(void)
{
131
	cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
132 133
	cpumask_set_cpu(0, mm_cpumask(&init_mm));

134
	set_max_mapnr(max_low_pfn);
L
Linus Torvalds 已提交
135 136
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);

137 138 139
	/* Setup guest page hinting */
	cmma_init();

L
Linus Torvalds 已提交
140
	/* this will put all low memory onto the freelists */
141
	free_all_bootmem();
142
	setup_zero_pages();	/* Setup zeroed pages. */
L
Linus Torvalds 已提交
143

144 145
	cmma_init_nodat();

146
	mem_init_print_info(NULL);
L
Linus Torvalds 已提交
147 148
}

149 150
void free_initmem(void)
{
H
Heiko Carstens 已提交
151 152
	__set_memory((unsigned long)_sinittext,
		     (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
153
		     SET_MEMORY_RW | SET_MEMORY_NX);
154
	free_initmem_default(POISON_FREE_INITMEM);
L
Linus Torvalds 已提交
155 156 157
}

#ifdef CONFIG_BLK_DEV_INITRD
158
void __init free_initrd_mem(unsigned long start, unsigned long end)
L
Linus Torvalds 已提交
159
{
160 161
	free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
			   "initrd");
L
Linus Torvalds 已提交
162 163
}
#endif
164

165 166 167 168 169 170 171 172 173
unsigned long memory_block_size_bytes(void)
{
	/*
	 * Make sure the memory block size is always greater
	 * or equal than the memory increment size.
	 */
	return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
}

174
#ifdef CONFIG_MEMORY_HOTPLUG
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226

#ifdef CONFIG_CMA

/* Prevent memory blocks which contain cma regions from going offline */

struct s390_cma_mem_data {
	unsigned long start;
	unsigned long end;
};

static int s390_cma_check_range(struct cma *cma, void *data)
{
	struct s390_cma_mem_data *mem_data;
	unsigned long start, end;

	mem_data = data;
	start = cma_get_base(cma);
	end = start + cma_get_size(cma);
	if (end < mem_data->start)
		return 0;
	if (start >= mem_data->end)
		return 0;
	return -EBUSY;
}

static int s390_cma_mem_notifier(struct notifier_block *nb,
				 unsigned long action, void *data)
{
	struct s390_cma_mem_data mem_data;
	struct memory_notify *arg;
	int rc = 0;

	arg = data;
	mem_data.start = arg->start_pfn << PAGE_SHIFT;
	mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
	if (action == MEM_GOING_OFFLINE)
		rc = cma_for_each_area(s390_cma_check_range, &mem_data);
	return notifier_from_errno(rc);
}

static struct notifier_block s390_cma_mem_nb = {
	.notifier_call = s390_cma_mem_notifier,
};

static int __init s390_cma_mem_init(void)
{
	return register_memory_notifier(&s390_cma_mem_nb);
}
device_initcall(s390_cma_mem_init);

#endif /* CONFIG_CMA */

227 228
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
		bool want_memblock)
229
{
230 231
	unsigned long start_pfn = PFN_DOWN(start);
	unsigned long size_pages = PFN_DOWN(size);
232
	int rc;
233 234 235 236

	rc = vmem_add_mapping(start, size);
	if (rc)
		return rc;
237

238
	rc = __add_pages(nid, start_pfn, size_pages, altmap, want_memblock);
239 240 241 242
	if (rc)
		vmem_remove_mapping(start, size);
	return rc;
}
243 244

#ifdef CONFIG_MEMORY_HOTREMOVE
245
int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
246 247 248 249 250 251 252 253 254
{
	/*
	 * There is no hardware or firmware interface which could trigger a
	 * hot memory remove on s390. So there is nothing that needs to be
	 * implemented.
	 */
	return -EBUSY;
}
#endif
255
#endif /* CONFIG_MEMORY_HOTPLUG */