init.c 7.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3
/*
 *  S390 version
4
 *    Copyright IBM Corp. 1999
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *    Author(s): Hartmut Penner (hp@de.ibm.com)
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1995  Linus Torvalds
 */

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
21
#include <linux/swiotlb.h>
L
Linus Torvalds 已提交
22 23 24
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/pagemap.h>
M
Mike Rapoport 已提交
25
#include <linux/memblock.h>
26
#include <linux/memory.h>
27
#include <linux/pfn.h>
28
#include <linux/poison.h>
29
#include <linux/initrd.h>
30
#include <linux/export.h>
31
#include <linux/cma.h>
32
#include <linux/gfp.h>
33
#include <linux/dma-direct.h>
L
Linus Torvalds 已提交
34
#include <asm/processor.h>
35
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
36 37 38 39 40
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/lowcore.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
41
#include <asm/sections.h>
42
#include <asm/ctl_reg.h>
43
#include <asm/sclp.h>
L
Laura Abbott 已提交
44
#include <asm/set_memory.h>
45
#include <asm/kasan.h>
46 47
#include <asm/dma-mapping.h>
#include <asm/uv.h>
L
Linus Torvalds 已提交
48

49
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
50

51
unsigned long empty_zero_page, zero_page_mask;
52
EXPORT_SYMBOL(empty_zero_page);
53
EXPORT_SYMBOL(zero_page_mask);
L
Linus Torvalds 已提交
54

55 56
bool initmem_freed;

57
static void __init setup_zero_pages(void)
58 59 60 61 62
{
	unsigned int order;
	struct page *page;
	int i;

63 64 65
	/* Latest machines require a mapping granularity of 512KB */
	order = 7;

66
	/* Limit number of empty zero pages for small memory sizes */
67
	while (order > 2 && (totalram_pages() >> 10) < (1UL << order))
68
		order--;
69 70 71 72 73 74 75 76

	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
	if (!empty_zero_page)
		panic("Out of memory in setup_zero_pages");

	page = virt_to_page((void *) empty_zero_page);
	split_page(page, order);
	for (i = 1 << order; i > 0; i--) {
77
		mark_page_reserved(page);
78 79 80
		page++;
	}

81
	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
82 83
}

L
Linus Torvalds 已提交
84 85 86 87 88
/*
 * paging_init() sets up the page tables
 */
void __init paging_init(void)
{
89
	unsigned long max_zone_pfns[MAX_NR_ZONES];
90
	unsigned long pgd_type, asce_bits;
91
	psw_t psw;
92

93
	init_mm.pgd = swapper_pg_dir;
94
	if (VMALLOC_END > _REGION2_SIZE) {
95 96 97 98 99 100
		asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
		pgd_type = _REGION2_ENTRY_EMPTY;
	} else {
		asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
		pgd_type = _REGION3_ENTRY_EMPTY;
	}
101 102
	init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
	S390_lowcore.kernel_asce = init_mm.context.asce;
103
	S390_lowcore.user_asce = S390_lowcore.kernel_asce;
104
	crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
H
Heiko Carstens 已提交
105
	vmem_map_init();
106
	kasan_copy_shadow(init_mm.pgd);
L
Linus Torvalds 已提交
107

108
	/* enable virtual mapping in kernel mode */
109 110 111
	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
	__ctl_load(S390_lowcore.kernel_asce, 7, 7);
	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
112
	psw.mask = __extract_psw();
113
	psw_bits(psw).dat = 1;
H
Heiko Carstens 已提交
114
	psw_bits(psw).as = PSW_BITS_AS_HOME;
115
	__load_psw_mask(psw.mask);
116
	kasan_free_early_identity();
L
Linus Torvalds 已提交
117

118
	sparse_init();
119
	zone_dma_bits = 31;
120 121 122
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
	max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
123
	free_area_init(max_zone_pfns);
L
Linus Torvalds 已提交
124 125
}

H
Heiko Carstens 已提交
126 127
void mark_rodata_ro(void)
{
128 129 130 131
	unsigned long size = __end_ro_after_init - __start_ro_after_init;

	set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT);
	pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
H
Heiko Carstens 已提交
132 133
}

134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
int set_memory_encrypted(unsigned long addr, int numpages)
{
	int i;

	/* make specified pages unshared, (swiotlb, dma_free) */
	for (i = 0; i < numpages; ++i) {
		uv_remove_shared(addr);
		addr += PAGE_SIZE;
	}
	return 0;
}

int set_memory_decrypted(unsigned long addr, int numpages)
{
	int i;
	/* make specified pages shared (swiotlb, dma_alloca) */
	for (i = 0; i < numpages; ++i) {
		uv_set_shared(addr);
		addr += PAGE_SIZE;
	}
	return 0;
}

/* are we a protected virtualization guest? */
158 159
bool force_dma_unencrypted(struct device *dev)
{
160
	return is_prot_virt_guest();
161 162
}

163 164 165 166 167 168 169 170 171 172 173 174
/* protected virtualization */
static void pv_init(void)
{
	if (!is_prot_virt_guest())
		return;

	/* make sure bounce buffers are shared */
	swiotlb_init(1);
	swiotlb_update_mem_attributes();
	swiotlb_force = SWIOTLB_FORCE;
}

L
Linus Torvalds 已提交
175 176
void __init mem_init(void)
{
177
	cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
178 179
	cpumask_set_cpu(0, mm_cpumask(&init_mm));

180
	set_max_mapnr(max_low_pfn);
L
Linus Torvalds 已提交
181 182
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);

183 184
	pv_init();

185 186 187
	/* Setup guest page hinting */
	cmma_init();

L
Linus Torvalds 已提交
188
	/* this will put all low memory onto the freelists */
189
	memblock_free_all();
190
	setup_zero_pages();	/* Setup zeroed pages. */
L
Linus Torvalds 已提交
191

192 193
	cmma_init_nodat();

194
	mem_init_print_info(NULL);
L
Linus Torvalds 已提交
195 196
}

197 198
void free_initmem(void)
{
199
	initmem_freed = true;
H
Heiko Carstens 已提交
200 201
	__set_memory((unsigned long)_sinittext,
		     (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
202
		     SET_MEMORY_RW | SET_MEMORY_NX);
203
	free_initmem_default(POISON_FREE_INITMEM);
L
Linus Torvalds 已提交
204 205
}

206 207 208 209 210 211 212 213 214
unsigned long memory_block_size_bytes(void)
{
	/*
	 * Make sure the memory block size is always greater
	 * or equal than the memory increment size.
	 */
	return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
}

215
#ifdef CONFIG_MEMORY_HOTPLUG
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267

#ifdef CONFIG_CMA

/* Prevent memory blocks which contain cma regions from going offline */

struct s390_cma_mem_data {
	unsigned long start;
	unsigned long end;
};

static int s390_cma_check_range(struct cma *cma, void *data)
{
	struct s390_cma_mem_data *mem_data;
	unsigned long start, end;

	mem_data = data;
	start = cma_get_base(cma);
	end = start + cma_get_size(cma);
	if (end < mem_data->start)
		return 0;
	if (start >= mem_data->end)
		return 0;
	return -EBUSY;
}

static int s390_cma_mem_notifier(struct notifier_block *nb,
				 unsigned long action, void *data)
{
	struct s390_cma_mem_data mem_data;
	struct memory_notify *arg;
	int rc = 0;

	arg = data;
	mem_data.start = arg->start_pfn << PAGE_SHIFT;
	mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
	if (action == MEM_GOING_OFFLINE)
		rc = cma_for_each_area(s390_cma_check_range, &mem_data);
	return notifier_from_errno(rc);
}

static struct notifier_block s390_cma_mem_nb = {
	.notifier_call = s390_cma_mem_notifier,
};

static int __init s390_cma_mem_init(void)
{
	return register_memory_notifier(&s390_cma_mem_nb);
}
device_initcall(s390_cma_mem_init);

#endif /* CONFIG_CMA */

268
int arch_add_memory(int nid, u64 start, u64 size,
269
		    struct mhp_params *params)
270
{
271 272
	unsigned long start_pfn = PFN_DOWN(start);
	unsigned long size_pages = PFN_DOWN(size);
273
	int rc;
274

275
	if (WARN_ON_ONCE(params->altmap))
276 277
		return -EINVAL;

278 279 280
	if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
		return -EINVAL;

281 282 283
	rc = vmem_add_mapping(start, size);
	if (rc)
		return rc;
284

285
	rc = __add_pages(nid, start_pfn, size_pages, params);
286 287 288 289
	if (rc)
		vmem_remove_mapping(start, size);
	return rc;
}
290

291 292
void arch_remove_memory(int nid, u64 start, u64 size,
			struct vmem_altmap *altmap)
293
{
294 295 296
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;

297
	__remove_pages(start_pfn, nr_pages, altmap);
298
	vmem_remove_mapping(start, size);
299
}
300
#endif /* CONFIG_MEMORY_HOTPLUG */