graph_kernel.pyx 11.4 KB
Newer Older
Y
yelrose 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
    Fast implementation for graph construction and sampling.
"""
import numpy as np
cimport numpy as np
cimport cython
from libcpp.map cimport map
from libcpp.set cimport set
from libcpp.unordered_set cimport unordered_set
from libcpp.unordered_map cimport unordered_map
from libcpp.vector cimport vector
from libc.stdlib cimport rand, RAND_MAX

@cython.boundscheck(False)
@cython.wraparound(False)
Y
Yelrose 已提交
29 30 31
def build_index(np.ndarray[np.int64_t, ndim=1] u,
        np.ndarray[np.int64_t, ndim=1] v,
        long long num_nodes):
Y
yelrose 已提交
32 33
    """Building Edge Index
    """
Y
Yelrose 已提交
34 35 36 37 38 39 40 41 42
    cdef long long i
    cdef long long h=len(u)
    cdef long long n_size = num_nodes
    cdef np.ndarray[np.int64_t, ndim=1] degree = np.zeros([n_size], dtype=np.int64)
    cdef np.ndarray[np.int64_t, ndim=1] count = np.zeros([n_size], dtype=np.int64)
    cdef np.ndarray[np.int64_t, ndim=1] _tmp_v = np.zeros([h], dtype=np.int64)
    cdef np.ndarray[np.int64_t, ndim=1] _tmp_u = np.zeros([h], dtype=np.int64)
    cdef np.ndarray[np.int64_t, ndim=1] _tmp_eid = np.zeros([h], dtype=np.int64)
    cdef np.ndarray[np.int64_t, ndim=1] indptr = np.zeros([n_size + 1], dtype=np.int64)
Y
yelrose 已提交
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66

    with nogil:
        for i in xrange(h):
            degree[u[i]] += 1

        for i in xrange(n_size):
            indptr[i + 1] = indptr[i] + degree[i]

        for i in xrange(h):
            _tmp_v[indptr[u[i]] + count[u[i]]] = v[i]
            _tmp_eid[indptr[u[i]] + count[u[i]]] = i
            _tmp_u[indptr[u[i]] + count[u[i]]] = u[i]
            count[u[i]] += 1

    cdef list output_eid = []
    cdef list output_v = []
    for i in xrange(n_size):
        output_eid.append(_tmp_eid[indptr[i]:indptr[i+1]])
        output_v.append(_tmp_v[indptr[i]:indptr[i+1]])
    return np.array(output_v), np.array(output_eid), degree, _tmp_u, _tmp_v, _tmp_eid


@cython.boundscheck(False)
@cython.wraparound(False)
Y
Yelrose 已提交
67 68
def map_edges(np.ndarray[np.int64_t, ndim=1] eid,
        np.ndarray[np.int64_t, ndim=2] edges,
Y
yelrose 已提交
69 70 71
        reindex):
    """Mapping edges by given dictionary
    """
Y
Yelrose 已提交
72 73 74 75 76
    cdef unordered_map[long long, long long] m = reindex
    cdef long long i = 0
    cdef long long h = len(eid)
    cdef np.ndarray[np.int64_t, ndim=2] r_edges = np.zeros([h, 2], dtype=np.int64)
    cdef long long j
Y
yelrose 已提交
77 78 79 80 81 82 83 84 85 86 87 88
    with nogil:
        for i in xrange(h):
            j = eid[i]
            r_edges[i, 0] = m[edges[j, 0]]
            r_edges[i, 1] = m[edges[j, 1]]
    return r_edges

@cython.boundscheck(False)
@cython.wraparound(False)
def map_nodes(nodes, reindex):
    """Mapping nodes by given dictionary
    """
Y
Yelrose 已提交
89 90 91 92 93 94 95 96 97 98
    cdef np.ndarray[np.int64_t, ndim=1] t_nodes = np.array(nodes, dtype=np.int64)
    cdef unordered_map[long long, long long] m = reindex
    cdef long long i = 0
    cdef long long h = len(nodes)
    cdef np.ndarray[np.int64_t, ndim=1] new_nodes = np.zeros([h], dtype=np.int64)
    cdef long long j
    with nogil:
        for i in xrange(h):
            j = t_nodes[i]
            new_nodes[i] = m[j]
Y
yelrose 已提交
99 100 101 102
    return new_nodes

@cython.boundscheck(False)
@cython.wraparound(False)
Y
Yelrose 已提交
103 104
def node2vec_sample(np.ndarray[np.int64_t, ndim=1] succ,
        np.ndarray[np.int64_t, ndim=1] prev_succ, long long prev_node,
Y
yelrose 已提交
105 106 107
        float p, float q):
    """Fast implement of node2vec sampling
    """
Y
Yelrose 已提交
108
    cdef long long i
Y
yelrose 已提交
109 110 111 112 113 114
    cdef succ_len = len(succ)
    cdef prev_succ_len = len(prev_succ)

    cdef vector[float] probs
    cdef float prob_sum = 0

Y
Yelrose 已提交
115
    cdef unordered_set[long long] prev_succ_set
Y
yelrose 已提交
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
    for i in xrange(prev_succ_len):
        prev_succ_set.insert(prev_succ[i])

    cdef float prob
    for i in xrange(succ_len):
        if succ[i] == prev_node:
            prob = 1. / p
        elif prev_succ_set.find(succ[i]) != prev_succ_set.end():
            prob = 1.
        else:
            prob = 1. / q
        probs.push_back(prob)
        prob_sum += prob

    cdef float rand_num = float(rand())/RAND_MAX * prob_sum

Y
Yelrose 已提交
132
    cdef long long sample_succ = 0
Y
yelrose 已提交
133 134 135 136 137
    for i in xrange(succ_len):
        rand_num -= probs[i]
        if rand_num <= 0:
            sample_succ = succ[i]
            return sample_succ
Y
Yelrose 已提交
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305

@cython.boundscheck(False)
@cython.wraparound(False)
def subset_choose_index(long long s_size,
                            np.ndarray[ndim=1, dtype=np.int64_t] nid,
                            np.ndarray[ndim=1, dtype=np.int64_t] rnd,
                            np.ndarray[ndim=1, dtype=np.int64_t] buff_nid,
                           long long offset):
    cdef long long n_size = len(nid)
    cdef long long i
    cdef long long j
    cdef unordered_map[long long, long long] m
    with nogil:
        for i in xrange(s_size):
            j = rnd[offset + i] % n_size
            if j >= i:
                buff_nid[offset + i] = nid[j] if m.find(j) == m.end() else nid[m[j]]
                m[j] = i if m.find(i) == m.end() else m[i]
            else:
                buff_nid[offset + i] = buff_nid[offset + j]
                buff_nid[offset + j] = nid[i] if m.find(i) == m.end() else nid[m[i]]


@cython.boundscheck(False)
@cython.wraparound(False)
def subset_choose_index_eid(long long s_size,
                            np.ndarray[ndim=1, dtype=np.int64_t] nid,
                            np.ndarray[ndim=1, dtype=np.int64_t] eid,
                            np.ndarray[ndim=1, dtype=np.int64_t] rnd,
                            np.ndarray[ndim=1, dtype=np.int64_t] buff_nid,
                            np.ndarray[ndim=1, dtype=np.int64_t] buff_eid,
                           long long offset):
    cdef long long n_size = len(nid)
    cdef long long i
    cdef long long j
    cdef unordered_map[long long, long long] m
    with nogil:
        for i in xrange(s_size):
            j = rnd[offset + i] % n_size
            if j >= i:
                if m.find(j) == m.end():
                    buff_nid[offset + i], buff_eid[offset + i] = nid[j], eid[j]
                else:
                    buff_nid[offset + i], buff_eid[offset + i] = nid[m[j]], eid[m[j]]
                m[j] = i if m.find(i) == m.end() else m[i]
            else:
                buff_nid[offset + i], buff_eid[offset + i] = buff_nid[offset + j], buff_eid[offset + j]
                if m.find(i) == m.end():
                    buff_nid[offset + j], buff_eid[offset + j] = nid[i], eid[i]
                else:
                    buff_nid[offset + j], buff_eid[offset + j] = nid[m[i]], eid[m[i]]

@cython.boundscheck(False)
@cython.wraparound(False)
def sample_subset(list nids, long long maxdegree, shuffle=False):
    cdef np.ndarray[ndim=1, dtype=np.int64_t] buff_index
    cdef long long buff_size, sample_size
    cdef long long total_buff_size = 0
    cdef long long inc = 0
    cdef list output = []
    for inc in xrange(len(nids)):
        buff_size = len(nids[inc])
        if buff_size > maxdegree:
            total_buff_size += maxdegree
        elif shuffle:
            total_buff_size += buff_size
    cdef np.ndarray[ndim=1, dtype=np.int64_t] buff_nid = np.zeros([total_buff_size], dtype=np.int64)
    cdef np.ndarray[np.int64_t, ndim=1] rnd = np.random.randint(0,  np.iinfo(np.int64).max,
                                                              dtype=np.int64, size=total_buff_size)

    cdef long long offset = 0
    for inc in xrange(len(nids)):
        buff_size = len(nids[inc])
        if not shuffle and buff_size <= maxdegree:
            output.append(nids[inc])
        else:
            sample_size = buff_size if buff_size <= maxdegree else maxdegree
            subset_choose_index(sample_size, nids[inc], rnd, buff_nid, offset)
            output.append(buff_nid[offset:offset+sample_size])
            offset += sample_size
    return output

@cython.boundscheck(False)
@cython.wraparound(False)
def sample_subset_with_eid(list nids, list eids, long long maxdegree, shuffle=False):
    cdef np.ndarray[ndim=1, dtype=np.int64_t] buff_index
    cdef long long buff_size, sample_size
    cdef long long total_buff_size = 0
    cdef long long inc = 0
    cdef list output = []
    cdef list output_eid = []
    for inc in xrange(len(nids)):
        buff_size = len(nids[inc])
        if buff_size > maxdegree:
            total_buff_size += maxdegree
        elif shuffle:
            total_buff_size += buff_size
    cdef np.ndarray[ndim=1, dtype=np.int64_t] buff_nid = np.zeros([total_buff_size], dtype=np.int64)
    cdef np.ndarray[ndim=1, dtype=np.int64_t] buff_eid = np.zeros([total_buff_size], dtype=np.int64)
    cdef np.ndarray[np.int64_t, ndim=1] rnd = np.random.randint(0,  np.iinfo(np.int64).max,
                                                              dtype=np.int64, size=total_buff_size)

    cdef long long offset = 0
    for inc in xrange(len(nids)):
        buff_size = len(nids[inc])
        if not shuffle and buff_size <= maxdegree:
            output.append(nids[inc])
            output_eid.append(eids[inc])
        else:
            sample_size = buff_size if buff_size <= maxdegree else maxdegree
            subset_choose_index_eid(sample_size, nids[inc], eids[inc], rnd, buff_nid, buff_eid, offset)
            output.append(buff_nid[offset:offset+sample_size])
            output_eid.append(buff_eid[offset:offset+sample_size])
            offset += sample_size
    return output, output_eid

@cython.boundscheck(False)
@cython.wraparound(False)
def skip_gram_gen_pair(vector[long long] walk, long win_size=5):
    cdef vector[long long] src
    cdef vector[long long] dst
    cdef long long l = len(walk)
    cdef long long real_win_size, left, right, i
    cdef np.ndarray[np.int64_t, ndim=1] rnd = np.random.randint(1,  win_size+1,
                                    dtype=np.int64, size=l)
    with nogil:
        for i in xrange(l):
            real_win_size = rnd[i]
            left = i - real_win_size
            if left < 0:
                left = 0
            right = i + real_win_size
            if right >= l:
                right = l - 1
            for j in xrange(left, right+1):
                if walk[i] == walk[j]:
                    continue
                src.push_back(walk[i])
                dst.push_back(walk[j])
    return src, dst

@cython.boundscheck(False)
@cython.wraparound(False)
def alias_sample_build_table(np.ndarray[np.float64_t, ndim=1] probs):
    cdef long long l = len(probs)
    cdef np.ndarray[np.float64_t, ndim=1] alias = probs * l
    cdef np.ndarray[np.int64_t, ndim=1] events = np.zeros(l, dtype=np.int64)

    cdef vector[long long] larger_num, smaller_num
    cdef long long i, s_i, l_i
    with nogil:
        for i in xrange(l):
            if alias[i] > 1:
                larger_num.push_back(i)
            elif alias[i] < 1:
                smaller_num.push_back(i)

        while smaller_num.size() > 0 and larger_num.size() > 0:
            s_i = smaller_num.back()
            l_i = larger_num.back()
            smaller_num.pop_back()
            events[s_i] = l_i
            alias[l_i] -= (1 - alias[s_i])
            if alias[l_i] <= 1:
                larger_num.pop_back()
            if alias[l_i] < 1:
                smaller_num.push_back(l_i)
    return alias, events