init.c 7.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3
/*
 *  S390 version
4
 *    Copyright IBM Corp. 1999
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *    Author(s): Hartmut Penner (hp@de.ibm.com)
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1995  Linus Torvalds
 */

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
21
#include <linux/swiotlb.h>
L
Linus Torvalds 已提交
22 23 24
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/pagemap.h>
M
Mike Rapoport 已提交
25
#include <linux/memblock.h>
26
#include <linux/memory.h>
27
#include <linux/pfn.h>
28
#include <linux/poison.h>
29
#include <linux/initrd.h>
30
#include <linux/export.h>
31
#include <linux/cma.h>
32
#include <linux/gfp.h>
33
#include <linux/dma-direct.h>
L
Linus Torvalds 已提交
34
#include <asm/processor.h>
35
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
36
#include <asm/pgalloc.h>
S
Sven Schnelle 已提交
37
#include <asm/kfence.h>
38
#include <asm/ptdump.h>
L
Linus Torvalds 已提交
39 40 41 42
#include <asm/dma.h>
#include <asm/lowcore.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
43
#include <asm/sections.h>
44
#include <asm/ctl_reg.h>
45
#include <asm/sclp.h>
L
Laura Abbott 已提交
46
#include <asm/set_memory.h>
47
#include <asm/kasan.h>
48 49
#include <asm/dma-mapping.h>
#include <asm/uv.h>
50
#include <linux/virtio_config.h>
L
Linus Torvalds 已提交
51

52
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
53 54 55
static pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir");

unsigned long s390_invalid_asce;
56

57
unsigned long empty_zero_page, zero_page_mask;
58
EXPORT_SYMBOL(empty_zero_page);
59
EXPORT_SYMBOL(zero_page_mask);
L
Linus Torvalds 已提交
60

61 62
bool initmem_freed;

63
static void __init setup_zero_pages(void)
64 65 66 67 68
{
	unsigned int order;
	struct page *page;
	int i;

69 70 71
	/* Latest machines require a mapping granularity of 512KB */
	order = 7;

72
	/* Limit number of empty zero pages for small memory sizes */
73
	while (order > 2 && (totalram_pages() >> 10) < (1UL << order))
74
		order--;
75 76 77 78 79 80 81 82

	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
	if (!empty_zero_page)
		panic("Out of memory in setup_zero_pages");

	page = virt_to_page((void *) empty_zero_page);
	split_page(page, order);
	for (i = 1 << order; i > 0; i--) {
83
		mark_page_reserved(page);
84 85 86
		page++;
	}

87
	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
88 89
}

L
Linus Torvalds 已提交
90 91 92 93 94
/*
 * paging_init() sets up the page tables
 */
void __init paging_init(void)
{
95
	unsigned long max_zone_pfns[MAX_NR_ZONES];
96
	unsigned long pgd_type, asce_bits;
97
	psw_t psw;
98

99 100 101
	s390_invalid_asce  = (unsigned long)invalid_pg_dir;
	s390_invalid_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
	crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
102
	init_mm.pgd = swapper_pg_dir;
103
	if (VMALLOC_END > _REGION2_SIZE) {
104 105 106 107 108 109
		asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
		pgd_type = _REGION2_ENTRY_EMPTY;
	} else {
		asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
		pgd_type = _REGION3_ENTRY_EMPTY;
	}
110 111
	init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
	S390_lowcore.kernel_asce = init_mm.context.asce;
112
	S390_lowcore.user_asce = s390_invalid_asce;
113
	crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
H
Heiko Carstens 已提交
114
	vmem_map_init();
115
	kasan_copy_shadow_mapping();
L
Linus Torvalds 已提交
116

117
	/* enable virtual mapping in kernel mode */
118
	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
119
	__ctl_load(S390_lowcore.user_asce, 7, 7);
120
	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
121
	psw.mask = __extract_psw();
122
	psw_bits(psw).dat = 1;
H
Heiko Carstens 已提交
123
	psw_bits(psw).as = PSW_BITS_AS_HOME;
124
	__load_psw_mask(psw.mask);
125
	kasan_free_early_identity();
L
Linus Torvalds 已提交
126

127
	sparse_init();
128
	zone_dma_bits = 31;
129 130 131
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
	max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
132
	free_area_init(max_zone_pfns);
L
Linus Torvalds 已提交
133 134
}

H
Heiko Carstens 已提交
135 136
void mark_rodata_ro(void)
{
137 138 139 140
	unsigned long size = __end_ro_after_init - __start_ro_after_init;

	set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT);
	pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
141
	debug_checkwx();
H
Heiko Carstens 已提交
142 143
}

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
int set_memory_encrypted(unsigned long addr, int numpages)
{
	int i;

	/* make specified pages unshared, (swiotlb, dma_free) */
	for (i = 0; i < numpages; ++i) {
		uv_remove_shared(addr);
		addr += PAGE_SIZE;
	}
	return 0;
}

int set_memory_decrypted(unsigned long addr, int numpages)
{
	int i;
	/* make specified pages shared (swiotlb, dma_alloca) */
	for (i = 0; i < numpages; ++i) {
		uv_set_shared(addr);
		addr += PAGE_SIZE;
	}
	return 0;
}

/* are we a protected virtualization guest? */
168 169
bool force_dma_unencrypted(struct device *dev)
{
170
	return is_prot_virt_guest();
171 172
}

173 174 175 176 177 178 179 180 181 182
#ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS

int arch_has_restricted_virtio_memory_access(void)
{
	return is_prot_virt_guest();
}
EXPORT_SYMBOL(arch_has_restricted_virtio_memory_access);

#endif

183 184 185 186 187 188 189
/* protected virtualization */
static void pv_init(void)
{
	if (!is_prot_virt_guest())
		return;

	/* make sure bounce buffers are shared */
190
	swiotlb_force = SWIOTLB_FORCE;
191 192 193 194
	swiotlb_init(1);
	swiotlb_update_mem_attributes();
}

L
Linus Torvalds 已提交
195 196
void __init mem_init(void)
{
197
	cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
198 199
	cpumask_set_cpu(0, mm_cpumask(&init_mm));

200
	set_max_mapnr(max_low_pfn);
L
Linus Torvalds 已提交
201 202
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);

203
	pv_init();
S
Sven Schnelle 已提交
204
	kfence_split_mapping();
205 206 207
	/* Setup guest page hinting */
	cmma_init();

L
Linus Torvalds 已提交
208
	/* this will put all low memory onto the freelists */
209
	memblock_free_all();
210
	setup_zero_pages();	/* Setup zeroed pages. */
L
Linus Torvalds 已提交
211

212
	cmma_init_nodat();
L
Linus Torvalds 已提交
213 214
}

215 216
void free_initmem(void)
{
217
	initmem_freed = true;
H
Heiko Carstens 已提交
218 219
	__set_memory((unsigned long)_sinittext,
		     (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
220
		     SET_MEMORY_RW | SET_MEMORY_NX);
221
	free_initmem_default(POISON_FREE_INITMEM);
L
Linus Torvalds 已提交
222 223
}

224 225 226 227 228 229 230 231 232
unsigned long memory_block_size_bytes(void)
{
	/*
	 * Make sure the memory block size is always greater
	 * or equal than the memory increment size.
	 */
	return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
}

233
#ifdef CONFIG_MEMORY_HOTPLUG
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285

#ifdef CONFIG_CMA

/* Prevent memory blocks which contain cma regions from going offline */

struct s390_cma_mem_data {
	unsigned long start;
	unsigned long end;
};

static int s390_cma_check_range(struct cma *cma, void *data)
{
	struct s390_cma_mem_data *mem_data;
	unsigned long start, end;

	mem_data = data;
	start = cma_get_base(cma);
	end = start + cma_get_size(cma);
	if (end < mem_data->start)
		return 0;
	if (start >= mem_data->end)
		return 0;
	return -EBUSY;
}

static int s390_cma_mem_notifier(struct notifier_block *nb,
				 unsigned long action, void *data)
{
	struct s390_cma_mem_data mem_data;
	struct memory_notify *arg;
	int rc = 0;

	arg = data;
	mem_data.start = arg->start_pfn << PAGE_SHIFT;
	mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
	if (action == MEM_GOING_OFFLINE)
		rc = cma_for_each_area(s390_cma_check_range, &mem_data);
	return notifier_from_errno(rc);
}

static struct notifier_block s390_cma_mem_nb = {
	.notifier_call = s390_cma_mem_notifier,
};

static int __init s390_cma_mem_init(void)
{
	return register_memory_notifier(&s390_cma_mem_nb);
}
device_initcall(s390_cma_mem_init);

#endif /* CONFIG_CMA */

286
int arch_add_memory(int nid, u64 start, u64 size,
287
		    struct mhp_params *params)
288
{
289 290
	unsigned long start_pfn = PFN_DOWN(start);
	unsigned long size_pages = PFN_DOWN(size);
291
	int rc;
292

293
	if (WARN_ON_ONCE(params->altmap))
294 295
		return -EINVAL;

296 297 298
	if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
		return -EINVAL;

299
	VM_BUG_ON(!mhp_range_allowed(start, size, true));
300 301 302
	rc = vmem_add_mapping(start, size);
	if (rc)
		return rc;
303

304
	rc = __add_pages(nid, start_pfn, size_pages, params);
305 306 307 308
	if (rc)
		vmem_remove_mapping(start, size);
	return rc;
}
309

310 311
void arch_remove_memory(int nid, u64 start, u64 size,
			struct vmem_altmap *altmap)
312
{
313 314 315
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;

316
	__remove_pages(start_pfn, nr_pages, altmap);
317
	vmem_remove_mapping(start, size);
318
}
319
#endif /* CONFIG_MEMORY_HOTPLUG */