init.c 7.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3
/*
 *  S390 version
4
 *    Copyright IBM Corp. 1999
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *    Author(s): Hartmut Penner (hp@de.ibm.com)
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1995  Linus Torvalds
 */

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
21
#include <linux/swiotlb.h>
L
Linus Torvalds 已提交
22 23 24
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/pagemap.h>
M
Mike Rapoport 已提交
25
#include <linux/memblock.h>
26
#include <linux/memory.h>
27
#include <linux/pfn.h>
28
#include <linux/poison.h>
29
#include <linux/initrd.h>
30
#include <linux/export.h>
31
#include <linux/cma.h>
32
#include <linux/gfp.h>
33
#include <linux/dma-direct.h>
L
Linus Torvalds 已提交
34
#include <asm/processor.h>
35
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
36
#include <asm/pgalloc.h>
37
#include <asm/ptdump.h>
L
Linus Torvalds 已提交
38 39 40 41
#include <asm/dma.h>
#include <asm/lowcore.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
42
#include <asm/sections.h>
43
#include <asm/ctl_reg.h>
44
#include <asm/sclp.h>
L
Laura Abbott 已提交
45
#include <asm/set_memory.h>
46
#include <asm/kasan.h>
47 48
#include <asm/dma-mapping.h>
#include <asm/uv.h>
49
#include <linux/virtio_config.h>
L
Linus Torvalds 已提交
50

51
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
52

53
unsigned long empty_zero_page, zero_page_mask;
54
EXPORT_SYMBOL(empty_zero_page);
55
EXPORT_SYMBOL(zero_page_mask);
L
Linus Torvalds 已提交
56

57 58
bool initmem_freed;

59
static void __init setup_zero_pages(void)
60 61 62 63 64
{
	unsigned int order;
	struct page *page;
	int i;

65 66 67
	/* Latest machines require a mapping granularity of 512KB */
	order = 7;

68
	/* Limit number of empty zero pages for small memory sizes */
69
	while (order > 2 && (totalram_pages() >> 10) < (1UL << order))
70
		order--;
71 72 73 74 75 76 77 78

	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
	if (!empty_zero_page)
		panic("Out of memory in setup_zero_pages");

	page = virt_to_page((void *) empty_zero_page);
	split_page(page, order);
	for (i = 1 << order; i > 0; i--) {
79
		mark_page_reserved(page);
80 81 82
		page++;
	}

83
	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
84 85
}

L
Linus Torvalds 已提交
86 87 88 89 90
/*
 * paging_init() sets up the page tables
 */
void __init paging_init(void)
{
91
	unsigned long max_zone_pfns[MAX_NR_ZONES];
92
	unsigned long pgd_type, asce_bits;
93
	psw_t psw;
94

95
	init_mm.pgd = swapper_pg_dir;
96
	if (VMALLOC_END > _REGION2_SIZE) {
97 98 99 100 101 102
		asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
		pgd_type = _REGION2_ENTRY_EMPTY;
	} else {
		asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
		pgd_type = _REGION3_ENTRY_EMPTY;
	}
103 104
	init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
	S390_lowcore.kernel_asce = init_mm.context.asce;
105
	S390_lowcore.user_asce = S390_lowcore.kernel_asce;
106
	crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
H
Heiko Carstens 已提交
107
	vmem_map_init();
108
	kasan_copy_shadow_mapping();
L
Linus Torvalds 已提交
109

110
	/* enable virtual mapping in kernel mode */
111 112 113
	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
	__ctl_load(S390_lowcore.kernel_asce, 7, 7);
	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
114
	psw.mask = __extract_psw();
115
	psw_bits(psw).dat = 1;
H
Heiko Carstens 已提交
116
	psw_bits(psw).as = PSW_BITS_AS_HOME;
117
	__load_psw_mask(psw.mask);
118
	kasan_free_early_identity();
L
Linus Torvalds 已提交
119

120
	sparse_init();
121
	zone_dma_bits = 31;
122 123 124
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
	max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
125
	free_area_init(max_zone_pfns);
L
Linus Torvalds 已提交
126 127
}

H
Heiko Carstens 已提交
128 129
void mark_rodata_ro(void)
{
130 131 132 133
	unsigned long size = __end_ro_after_init - __start_ro_after_init;

	set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT);
	pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
134
	debug_checkwx();
H
Heiko Carstens 已提交
135 136
}

137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
int set_memory_encrypted(unsigned long addr, int numpages)
{
	int i;

	/* make specified pages unshared, (swiotlb, dma_free) */
	for (i = 0; i < numpages; ++i) {
		uv_remove_shared(addr);
		addr += PAGE_SIZE;
	}
	return 0;
}

int set_memory_decrypted(unsigned long addr, int numpages)
{
	int i;
	/* make specified pages shared (swiotlb, dma_alloca) */
	for (i = 0; i < numpages; ++i) {
		uv_set_shared(addr);
		addr += PAGE_SIZE;
	}
	return 0;
}

/* are we a protected virtualization guest? */
161 162
bool force_dma_unencrypted(struct device *dev)
{
163
	return is_prot_virt_guest();
164 165
}

166 167 168 169 170 171 172 173 174 175
#ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS

int arch_has_restricted_virtio_memory_access(void)
{
	return is_prot_virt_guest();
}
EXPORT_SYMBOL(arch_has_restricted_virtio_memory_access);

#endif

176 177 178 179 180 181 182 183 184 185 186 187
/* protected virtualization */
static void pv_init(void)
{
	if (!is_prot_virt_guest())
		return;

	/* make sure bounce buffers are shared */
	swiotlb_init(1);
	swiotlb_update_mem_attributes();
	swiotlb_force = SWIOTLB_FORCE;
}

L
Linus Torvalds 已提交
188 189
void __init mem_init(void)
{
190
	cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
191 192
	cpumask_set_cpu(0, mm_cpumask(&init_mm));

193
	set_max_mapnr(max_low_pfn);
L
Linus Torvalds 已提交
194 195
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);

196 197
	pv_init();

198 199 200
	/* Setup guest page hinting */
	cmma_init();

L
Linus Torvalds 已提交
201
	/* this will put all low memory onto the freelists */
202
	memblock_free_all();
203
	setup_zero_pages();	/* Setup zeroed pages. */
L
Linus Torvalds 已提交
204

205 206
	cmma_init_nodat();

207
	mem_init_print_info(NULL);
L
Linus Torvalds 已提交
208 209
}

210 211
void free_initmem(void)
{
212
	initmem_freed = true;
H
Heiko Carstens 已提交
213 214
	__set_memory((unsigned long)_sinittext,
		     (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
215
		     SET_MEMORY_RW | SET_MEMORY_NX);
216
	free_initmem_default(POISON_FREE_INITMEM);
L
Linus Torvalds 已提交
217 218
}

219 220 221 222 223 224 225 226 227
unsigned long memory_block_size_bytes(void)
{
	/*
	 * Make sure the memory block size is always greater
	 * or equal than the memory increment size.
	 */
	return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
}

228
#ifdef CONFIG_MEMORY_HOTPLUG
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280

#ifdef CONFIG_CMA

/* Prevent memory blocks which contain cma regions from going offline */

struct s390_cma_mem_data {
	unsigned long start;
	unsigned long end;
};

static int s390_cma_check_range(struct cma *cma, void *data)
{
	struct s390_cma_mem_data *mem_data;
	unsigned long start, end;

	mem_data = data;
	start = cma_get_base(cma);
	end = start + cma_get_size(cma);
	if (end < mem_data->start)
		return 0;
	if (start >= mem_data->end)
		return 0;
	return -EBUSY;
}

static int s390_cma_mem_notifier(struct notifier_block *nb,
				 unsigned long action, void *data)
{
	struct s390_cma_mem_data mem_data;
	struct memory_notify *arg;
	int rc = 0;

	arg = data;
	mem_data.start = arg->start_pfn << PAGE_SHIFT;
	mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
	if (action == MEM_GOING_OFFLINE)
		rc = cma_for_each_area(s390_cma_check_range, &mem_data);
	return notifier_from_errno(rc);
}

static struct notifier_block s390_cma_mem_nb = {
	.notifier_call = s390_cma_mem_notifier,
};

static int __init s390_cma_mem_init(void)
{
	return register_memory_notifier(&s390_cma_mem_nb);
}
device_initcall(s390_cma_mem_init);

#endif /* CONFIG_CMA */

281
int arch_add_memory(int nid, u64 start, u64 size,
282
		    struct mhp_params *params)
283
{
284 285
	unsigned long start_pfn = PFN_DOWN(start);
	unsigned long size_pages = PFN_DOWN(size);
286
	int rc;
287

288
	if (WARN_ON_ONCE(params->altmap))
289 290
		return -EINVAL;

291 292 293
	if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
		return -EINVAL;

294 295 296
	rc = vmem_add_mapping(start, size);
	if (rc)
		return rc;
297

298
	rc = __add_pages(nid, start_pfn, size_pages, params);
299 300 301 302
	if (rc)
		vmem_remove_mapping(start, size);
	return rc;
}
303

304 305
void arch_remove_memory(int nid, u64 start, u64 size,
			struct vmem_altmap *altmap)
306
{
307 308 309
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;

310
	__remove_pages(start_pfn, nr_pages, altmap);
311
	vmem_remove_mapping(start, size);
312
}
313
#endif /* CONFIG_MEMORY_HOTPLUG */