init.c 7.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3
/*
 *  S390 version
4
 *    Copyright IBM Corp. 1999
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *    Author(s): Hartmut Penner (hp@de.ibm.com)
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1995  Linus Torvalds
 */

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
21
#include <linux/swiotlb.h>
L
Linus Torvalds 已提交
22 23 24
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/pagemap.h>
M
Mike Rapoport 已提交
25
#include <linux/memblock.h>
26
#include <linux/memory.h>
27
#include <linux/pfn.h>
28
#include <linux/poison.h>
29
#include <linux/initrd.h>
30
#include <linux/export.h>
31
#include <linux/cma.h>
32
#include <linux/gfp.h>
33
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
34
#include <asm/processor.h>
35
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
36 37 38 39 40 41
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/lowcore.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
42
#include <asm/sections.h>
43
#include <asm/ctl_reg.h>
44
#include <asm/sclp.h>
L
Laura Abbott 已提交
45
#include <asm/set_memory.h>
46
#include <asm/kasan.h>
47 48
#include <asm/dma-mapping.h>
#include <asm/uv.h>
L
Linus Torvalds 已提交
49

50
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
51

52
unsigned long empty_zero_page, zero_page_mask;
53
EXPORT_SYMBOL(empty_zero_page);
54
EXPORT_SYMBOL(zero_page_mask);
L
Linus Torvalds 已提交
55

56 57
bool initmem_freed;

58
static void __init setup_zero_pages(void)
59 60 61 62 63
{
	unsigned int order;
	struct page *page;
	int i;

64 65 66
	/* Latest machines require a mapping granularity of 512KB */
	order = 7;

67
	/* Limit number of empty zero pages for small memory sizes */
68
	while (order > 2 && (totalram_pages() >> 10) < (1UL << order))
69
		order--;
70 71 72 73 74 75 76 77

	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
	if (!empty_zero_page)
		panic("Out of memory in setup_zero_pages");

	page = virt_to_page((void *) empty_zero_page);
	split_page(page, order);
	for (i = 1 << order; i > 0; i--) {
78
		mark_page_reserved(page);
79 80 81
		page++;
	}

82
	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
83 84
}

L
Linus Torvalds 已提交
85 86 87 88 89
/*
 * paging_init() sets up the page tables
 */
void __init paging_init(void)
{
90
	unsigned long max_zone_pfns[MAX_NR_ZONES];
91
	unsigned long pgd_type, asce_bits;
92
	psw_t psw;
93

94
	init_mm.pgd = swapper_pg_dir;
95
	if (VMALLOC_END > _REGION2_SIZE) {
96 97 98 99 100 101
		asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
		pgd_type = _REGION2_ENTRY_EMPTY;
	} else {
		asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
		pgd_type = _REGION3_ENTRY_EMPTY;
	}
102 103
	init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
	S390_lowcore.kernel_asce = init_mm.context.asce;
104
	S390_lowcore.user_asce = S390_lowcore.kernel_asce;
105
	crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
H
Heiko Carstens 已提交
106
	vmem_map_init();
107
	kasan_copy_shadow(init_mm.pgd);
L
Linus Torvalds 已提交
108

109
	/* enable virtual mapping in kernel mode */
110 111 112
	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
	__ctl_load(S390_lowcore.kernel_asce, 7, 7);
	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
113
	psw.mask = __extract_psw();
114
	psw_bits(psw).dat = 1;
H
Heiko Carstens 已提交
115
	psw_bits(psw).as = PSW_BITS_AS_HOME;
116
	__load_psw_mask(psw.mask);
117
	kasan_free_early_identity();
L
Linus Torvalds 已提交
118

119 120
	sparse_memory_present_with_active_regions(MAX_NUMNODES);
	sparse_init();
121 122 123 124
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
	max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
	free_area_init_nodes(max_zone_pfns);
L
Linus Torvalds 已提交
125 126
}

H
Heiko Carstens 已提交
127 128
void mark_rodata_ro(void)
{
129 130 131 132
	unsigned long size = __end_ro_after_init - __start_ro_after_init;

	set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT);
	pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
H
Heiko Carstens 已提交
133 134
}

135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
int set_memory_encrypted(unsigned long addr, int numpages)
{
	int i;

	/* make specified pages unshared, (swiotlb, dma_free) */
	for (i = 0; i < numpages; ++i) {
		uv_remove_shared(addr);
		addr += PAGE_SIZE;
	}
	return 0;
}

int set_memory_decrypted(unsigned long addr, int numpages)
{
	int i;
	/* make specified pages shared (swiotlb, dma_alloca) */
	for (i = 0; i < numpages; ++i) {
		uv_set_shared(addr);
		addr += PAGE_SIZE;
	}
	return 0;
}

/* are we a protected virtualization guest? */
bool sev_active(void)
{
	return is_prot_virt_guest();
}

/* protected virtualization */
static void pv_init(void)
{
	if (!is_prot_virt_guest())
		return;

	/* make sure bounce buffers are shared */
	swiotlb_init(1);
	swiotlb_update_mem_attributes();
	swiotlb_force = SWIOTLB_FORCE;
}

L
Linus Torvalds 已提交
176 177
void __init mem_init(void)
{
178
	cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
179 180
	cpumask_set_cpu(0, mm_cpumask(&init_mm));

181
	set_max_mapnr(max_low_pfn);
L
Linus Torvalds 已提交
182 183
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);

184 185
	pv_init();

186 187 188
	/* Setup guest page hinting */
	cmma_init();

L
Linus Torvalds 已提交
189
	/* this will put all low memory onto the freelists */
190
	memblock_free_all();
191
	setup_zero_pages();	/* Setup zeroed pages. */
L
Linus Torvalds 已提交
192

193 194
	cmma_init_nodat();

195
	mem_init_print_info(NULL);
L
Linus Torvalds 已提交
196 197
}

198 199
void free_initmem(void)
{
200
	initmem_freed = true;
H
Heiko Carstens 已提交
201 202
	__set_memory((unsigned long)_sinittext,
		     (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
203
		     SET_MEMORY_RW | SET_MEMORY_NX);
204
	free_initmem_default(POISON_FREE_INITMEM);
L
Linus Torvalds 已提交
205 206
}

207 208 209 210 211 212 213 214 215
unsigned long memory_block_size_bytes(void)
{
	/*
	 * Make sure the memory block size is always greater
	 * or equal than the memory increment size.
	 */
	return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
}

216
#ifdef CONFIG_MEMORY_HOTPLUG
217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268

#ifdef CONFIG_CMA

/* Prevent memory blocks which contain cma regions from going offline */

struct s390_cma_mem_data {
	unsigned long start;
	unsigned long end;
};

static int s390_cma_check_range(struct cma *cma, void *data)
{
	struct s390_cma_mem_data *mem_data;
	unsigned long start, end;

	mem_data = data;
	start = cma_get_base(cma);
	end = start + cma_get_size(cma);
	if (end < mem_data->start)
		return 0;
	if (start >= mem_data->end)
		return 0;
	return -EBUSY;
}

static int s390_cma_mem_notifier(struct notifier_block *nb,
				 unsigned long action, void *data)
{
	struct s390_cma_mem_data mem_data;
	struct memory_notify *arg;
	int rc = 0;

	arg = data;
	mem_data.start = arg->start_pfn << PAGE_SHIFT;
	mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
	if (action == MEM_GOING_OFFLINE)
		rc = cma_for_each_area(s390_cma_check_range, &mem_data);
	return notifier_from_errno(rc);
}

static struct notifier_block s390_cma_mem_nb = {
	.notifier_call = s390_cma_mem_notifier,
};

static int __init s390_cma_mem_init(void)
{
	return register_memory_notifier(&s390_cma_mem_nb);
}
device_initcall(s390_cma_mem_init);

#endif /* CONFIG_CMA */

269 270
int arch_add_memory(int nid, u64 start, u64 size,
		struct mhp_restrictions *restrictions)
271
{
272 273
	unsigned long start_pfn = PFN_DOWN(start);
	unsigned long size_pages = PFN_DOWN(size);
274
	int rc;
275 276 277 278

	rc = vmem_add_mapping(start, size);
	if (rc)
		return rc;
279

280
	rc = __add_pages(nid, start_pfn, size_pages, restrictions);
281 282 283 284
	if (rc)
		vmem_remove_mapping(start, size);
	return rc;
}
285 286

#ifdef CONFIG_MEMORY_HOTREMOVE
287 288
void arch_remove_memory(int nid, u64 start, u64 size,
			struct vmem_altmap *altmap)
289 290 291 292 293 294
{
	/*
	 * There is no hardware or firmware interface which could trigger a
	 * hot memory remove on s390. So there is nothing that needs to be
	 * implemented.
	 */
295
	BUG();
296 297
}
#endif
298
#endif /* CONFIG_MEMORY_HOTPLUG */