init.c 7.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3
/*
 *  S390 version
4
 *    Copyright IBM Corp. 1999
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *    Author(s): Hartmut Penner (hp@de.ibm.com)
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1995  Linus Torvalds
 */

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
21
#include <linux/swiotlb.h>
L
Linus Torvalds 已提交
22 23 24
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/pagemap.h>
M
Mike Rapoport 已提交
25
#include <linux/memblock.h>
26
#include <linux/memory.h>
27
#include <linux/pfn.h>
28
#include <linux/poison.h>
29
#include <linux/initrd.h>
30
#include <linux/export.h>
31
#include <linux/cma.h>
32
#include <linux/gfp.h>
33
#include <linux/dma-direct.h>
L
Linus Torvalds 已提交
34
#include <asm/processor.h>
35
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
36
#include <asm/pgalloc.h>
37
#include <asm/ptdump.h>
L
Linus Torvalds 已提交
38 39 40 41
#include <asm/dma.h>
#include <asm/lowcore.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
42
#include <asm/sections.h>
43
#include <asm/ctl_reg.h>
44
#include <asm/sclp.h>
L
Laura Abbott 已提交
45
#include <asm/set_memory.h>
46
#include <asm/kasan.h>
47 48
#include <asm/dma-mapping.h>
#include <asm/uv.h>
49
#include <linux/virtio_config.h>
L
Linus Torvalds 已提交
50

51
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
52 53 54
static pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir");

unsigned long s390_invalid_asce;
55

56
unsigned long empty_zero_page, zero_page_mask;
57
EXPORT_SYMBOL(empty_zero_page);
58
EXPORT_SYMBOL(zero_page_mask);
L
Linus Torvalds 已提交
59

60 61
bool initmem_freed;

62
static void __init setup_zero_pages(void)
63 64 65 66 67
{
	unsigned int order;
	struct page *page;
	int i;

68 69 70
	/* Latest machines require a mapping granularity of 512KB */
	order = 7;

71
	/* Limit number of empty zero pages for small memory sizes */
72
	while (order > 2 && (totalram_pages() >> 10) < (1UL << order))
73
		order--;
74 75 76 77 78 79 80 81

	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
	if (!empty_zero_page)
		panic("Out of memory in setup_zero_pages");

	page = virt_to_page((void *) empty_zero_page);
	split_page(page, order);
	for (i = 1 << order; i > 0; i--) {
82
		mark_page_reserved(page);
83 84 85
		page++;
	}

86
	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
87 88
}

L
Linus Torvalds 已提交
89 90 91 92 93
/*
 * paging_init() sets up the page tables
 */
void __init paging_init(void)
{
94
	unsigned long max_zone_pfns[MAX_NR_ZONES];
95
	unsigned long pgd_type, asce_bits;
96
	psw_t psw;
97

98 99 100
	s390_invalid_asce  = (unsigned long)invalid_pg_dir;
	s390_invalid_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
	crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
101
	init_mm.pgd = swapper_pg_dir;
102
	if (VMALLOC_END > _REGION2_SIZE) {
103 104 105 106 107 108
		asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
		pgd_type = _REGION2_ENTRY_EMPTY;
	} else {
		asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
		pgd_type = _REGION3_ENTRY_EMPTY;
	}
109 110
	init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
	S390_lowcore.kernel_asce = init_mm.context.asce;
111
	S390_lowcore.user_asce = s390_invalid_asce;
112
	crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
H
Heiko Carstens 已提交
113
	vmem_map_init();
114
	kasan_copy_shadow_mapping();
L
Linus Torvalds 已提交
115

116
	/* enable virtual mapping in kernel mode */
117
	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
118
	__ctl_load(S390_lowcore.user_asce, 7, 7);
119
	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
120
	psw.mask = __extract_psw();
121
	psw_bits(psw).dat = 1;
H
Heiko Carstens 已提交
122
	psw_bits(psw).as = PSW_BITS_AS_HOME;
123
	__load_psw_mask(psw.mask);
124
	kasan_free_early_identity();
L
Linus Torvalds 已提交
125

126
	sparse_init();
127
	zone_dma_bits = 31;
128 129 130
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
	max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
131
	free_area_init(max_zone_pfns);
L
Linus Torvalds 已提交
132 133
}

H
Heiko Carstens 已提交
134 135
void mark_rodata_ro(void)
{
136 137 138 139
	unsigned long size = __end_ro_after_init - __start_ro_after_init;

	set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT);
	pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
140
	debug_checkwx();
H
Heiko Carstens 已提交
141 142
}

143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
int set_memory_encrypted(unsigned long addr, int numpages)
{
	int i;

	/* make specified pages unshared, (swiotlb, dma_free) */
	for (i = 0; i < numpages; ++i) {
		uv_remove_shared(addr);
		addr += PAGE_SIZE;
	}
	return 0;
}

int set_memory_decrypted(unsigned long addr, int numpages)
{
	int i;
	/* make specified pages shared (swiotlb, dma_alloca) */
	for (i = 0; i < numpages; ++i) {
		uv_set_shared(addr);
		addr += PAGE_SIZE;
	}
	return 0;
}

/* are we a protected virtualization guest? */
167 168
bool force_dma_unencrypted(struct device *dev)
{
169
	return is_prot_virt_guest();
170 171
}

172 173 174 175 176 177 178 179 180 181
#ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS

int arch_has_restricted_virtio_memory_access(void)
{
	return is_prot_virt_guest();
}
EXPORT_SYMBOL(arch_has_restricted_virtio_memory_access);

#endif

182 183 184 185 186 187 188 189 190 191 192 193
/* protected virtualization */
static void pv_init(void)
{
	if (!is_prot_virt_guest())
		return;

	/* make sure bounce buffers are shared */
	swiotlb_init(1);
	swiotlb_update_mem_attributes();
	swiotlb_force = SWIOTLB_FORCE;
}

L
Linus Torvalds 已提交
194 195
void __init mem_init(void)
{
196
	cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
197 198
	cpumask_set_cpu(0, mm_cpumask(&init_mm));

199
	set_max_mapnr(max_low_pfn);
L
Linus Torvalds 已提交
200 201
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);

202 203
	pv_init();

204 205 206
	/* Setup guest page hinting */
	cmma_init();

L
Linus Torvalds 已提交
207
	/* this will put all low memory onto the freelists */
208
	memblock_free_all();
209
	setup_zero_pages();	/* Setup zeroed pages. */
L
Linus Torvalds 已提交
210

211 212
	cmma_init_nodat();

213
	mem_init_print_info(NULL);
L
Linus Torvalds 已提交
214 215
}

216 217
void free_initmem(void)
{
218
	initmem_freed = true;
H
Heiko Carstens 已提交
219 220
	__set_memory((unsigned long)_sinittext,
		     (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
221
		     SET_MEMORY_RW | SET_MEMORY_NX);
222
	free_initmem_default(POISON_FREE_INITMEM);
L
Linus Torvalds 已提交
223 224
}

225 226 227 228 229 230 231 232 233
unsigned long memory_block_size_bytes(void)
{
	/*
	 * Make sure the memory block size is always greater
	 * or equal than the memory increment size.
	 */
	return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
}

234
#ifdef CONFIG_MEMORY_HOTPLUG
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286

#ifdef CONFIG_CMA

/* Prevent memory blocks which contain cma regions from going offline */

struct s390_cma_mem_data {
	unsigned long start;
	unsigned long end;
};

static int s390_cma_check_range(struct cma *cma, void *data)
{
	struct s390_cma_mem_data *mem_data;
	unsigned long start, end;

	mem_data = data;
	start = cma_get_base(cma);
	end = start + cma_get_size(cma);
	if (end < mem_data->start)
		return 0;
	if (start >= mem_data->end)
		return 0;
	return -EBUSY;
}

static int s390_cma_mem_notifier(struct notifier_block *nb,
				 unsigned long action, void *data)
{
	struct s390_cma_mem_data mem_data;
	struct memory_notify *arg;
	int rc = 0;

	arg = data;
	mem_data.start = arg->start_pfn << PAGE_SHIFT;
	mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
	if (action == MEM_GOING_OFFLINE)
		rc = cma_for_each_area(s390_cma_check_range, &mem_data);
	return notifier_from_errno(rc);
}

static struct notifier_block s390_cma_mem_nb = {
	.notifier_call = s390_cma_mem_notifier,
};

static int __init s390_cma_mem_init(void)
{
	return register_memory_notifier(&s390_cma_mem_nb);
}
device_initcall(s390_cma_mem_init);

#endif /* CONFIG_CMA */

287
int arch_add_memory(int nid, u64 start, u64 size,
288
		    struct mhp_params *params)
289
{
290 291
	unsigned long start_pfn = PFN_DOWN(start);
	unsigned long size_pages = PFN_DOWN(size);
292
	int rc;
293

294
	if (WARN_ON_ONCE(params->altmap))
295 296
		return -EINVAL;

297 298 299
	if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
		return -EINVAL;

300 301 302
	rc = vmem_add_mapping(start, size);
	if (rc)
		return rc;
303

304
	rc = __add_pages(nid, start_pfn, size_pages, params);
305 306 307 308
	if (rc)
		vmem_remove_mapping(start, size);
	return rc;
}
309

310 311
void arch_remove_memory(int nid, u64 start, u64 size,
			struct vmem_altmap *altmap)
312
{
313 314 315
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;

316
	__remove_pages(start_pfn, nr_pages, altmap);
317
	vmem_remove_mapping(start, size);
318
}
319
#endif /* CONFIG_MEMORY_HOTPLUG */