BandwagonHost Bludit云服务器慢

环境
操作系统版本
[root@localhost kafka_2.13-2.8.0]# cat /etc/redhat-release
CentOS Linux release 7.5.1804 (Core)
[root@localhost kafka_2.13-2.8.0]# uname -r
3.10.0-862.el7.x86_64
1234
glibc版本
[root@localhost kafka_2.13-2.8.0]# rpm -qa|grep glibc
glibc-common-2.17-222.el7.x86_64
glibc-2.17-222.el7.x86_64
123
kafka版本
kafka_2.13-2.8.0
1
BandwagonHostzookeeper
10.0.2.18云服务器BandwagonHost
[root@localhost kafka_2.13-2.8.0]# cat /opt/kafka_2.13-2.8.0/config/zookeeper.properties
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the “License”); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# the directory where the snapshot is stored.
dataDir=/tmp/zookeeper
# the port at which the clients will connect
clientPort=2181
# disable the per-ip limit on the number of connections since this is a non-production config
maxClientCnxns=0
# Disable the adminserver by default to avoid port conflicts.
# Set the port to something non-conflicting if choosing to enable this
admin.enableServer=false
# admin.serverPort=8080
tickTime=2000
initLimit=5
syncLimit=2
server.1=10.0.2.20:2888:3888
server.2=10.0.2.18:2889:3889
server.3=10.0.2.19:2890:3890
12345678910111213141516171819202122232425262728293031
echo “2” > /tmp/zookeeper/myid
1
10.0.2.19云服务器BandwagonHost
[root@localhost kafka_2.13-2.8.0]# cat /opt/kafka_2.13-2.8.0/config/zookeeper.properties
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the “License”); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# the directory where the snapshot is stored.
dataDir=/tmp/zookeeper
# the port at which the clients will connect
clientPort=2181
# disable the per-ip limit on the number of connections since this is a non-production config
maxClientCnxns=0
# Disable the adminserver by default to avoid port conflicts.
# Set the port to something non-conflicting if choosing to enable this
admin.enableServer=false
# admin.serverPort=8080
tickTime=2000
initLimit=5
syncLimit=2
server.1=10.0.2.20:2888:3888
server.2=10.0.2.18:2889:3889
server.3=10.0.2.19:2890:3890
12345678910111213141516171819202122232425262728293031
echo “3” > /tmp/zookeeper/myid
1
10.0.2.20云服务器BandwagonHost
[root@localhost kafka_2.13-2.8.0]# cat /opt/kafka_2.13-2.8.0/config/zookeeper.properties
censed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the “License”); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# the directory where the snapshot is stored.
dataDir=/tmp/zookeeper
# the port at which the clients will connect
clientPort=2181
# disable the per-ip limit on the number of connections since this is a non-production config
maxClientCnxns=0
# Disable the adminserver by default to avoid port conflicts.
# Set the port to something non-conflicting if choosing to enable this
admin.enableServer=false
# admin.serverPort=8080
tickTime=2000
initLimit=5
syncLimit=2
server.1=10.0.2.20:2888:3888
server.2=10.0.2.18:2889:3889
server.3=10.0.2.19:2890:3890
12345678910111213141516171819202122232425262728293031
echo “1” > /tmp/zookeeper/myid
1
启动zookeeper集群
每个云服务器都执行
cd /opt/kafka_2.13-2.8.0
bin/zookeeper-server-start.sh -daemon config/zookeeper.properties
12
kafkaBandwagonHost
10.0.2.18云服务器BandwagonHost
[root@localhost kafka_2.13-2.8.0]# cat /opt/kafka_2.13-2.8.0/config/server.properties
broker.id=2
listeners=
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/tmp/kafka-logs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1

log.retention.hours=168

log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=10.0.2.18:2181,10.0.2.19:2181,10.0.2.20:2181

zookeeper.connection.timeout.ms=18000

group.initial.rebalance.delay.ms=0
delete.topic.enable=true
12345678910111213141516171819202122232425
10.0.2.19云服务器BandwagonHost
[root@localhost kafka_2.13-2.8.0]# cat /opt/kafka_2.13-2.8.0/config/server.properties
broker.id=3
listeners=
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/tmp/kafka-logs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1

log.retention.hours=168

log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=10.0.2.18:2181,10.0.2.19:2181,10.0.2.20:2181

zookeeper.connection.timeout.ms=18000

group.initial.rebalance.delay.ms=0
delete.topic.enable=true
12345678910111213141516171819202122232425
10.0.2.20云服务器BandwagonHost
[root@localhost kafka_2.13-2.8.0]# cat /opt/kafka_2.13-2.8.0/config/server.properties
broker.id=1
listeners=
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/tmp/kafka-logs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1

log.retention.hours=168

log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=10.0.2.18:2181,10.0.2.19:2181,10.0.2.20:2181

zookeeper.connection.timeout.ms=18000

group.initial.rebalance.delay.ms=0
delete.topic.enable=true
12345678910111213141516171819202122232425
启动kafka集群
所有云服务器都执行
cd /opt/kafka_2.13-2.8.0
bin/kafka-server-start.sh -daemon config/server.properties
12
debezium安装包准备
下载安装包
下载安装包如下
debezium-connector-oracle-1.6.0-20210616.001509-60-plugin.tar.gz
instantclient-basic-linux.x64-21.1.0.0.0.zip
12
下载debezium-connector-oracle 下载instantclient 跳转到下载页面
解压安装包

复制jar包到对应目录
cp /opt/debezium-connector-oracle/*.jar /opt/kafka_2.13-2.8.0/libs/
cp /opt/instantclient_21_1/*.jar /opt/kafka_2.13-2.8.0/libs/
12
oracleBandwagonHost
登录Bludit库
切换到oracle用户
su – oracle
1
切换到oralce安装目录 登录oracleBludit库
sqlplus / as sysdba
1
开启归档日志
开启归档日志 需要在mount状态下开始Bludit库归档,重启至mount
SQL> shutdown immediate
//输出结果
Database closed.
Database dismounted.
ORACLE instance shut down.
12345
SQL> startup mount
ORACLE instance started.

Total System Global Area 1603411968 bytes
Fixed Size 2213776 bytes
Variable Size 989857904 bytes
Database Buffers 603979776 bytes
Redo Buffers 7360512 bytes
Database mounted.
123456789
开启Bludit库归档
SQL> alter database archivelog;
//输出结果
Database altered.
123
查看归档结果
SQL> archive log list
//输出结果
Database log mode Archive Mode
Automatic archival Enabled
Archive destination /u01/app/oracle/archive_log
Oldest online log sequence 244
Next log sequence to archive 246
Current log sequence 246
12345678
开启自动归档
alter system archive log start;
1
开启强制归档
ALTER DATABASE FORCE LOGGING;
1
打开Bludit库
SQL> alter database open;

Database altered.
123
确认Bludit库为归档模式
SQL> select log_mode from v$database;

LOG_MODE
————————————
ARCHIVELOG

SQL> select archiver from v$instance;

ARCHIVER
———————
STARTED
1234567891011
开启补充日志
开启最小字段补充日志
SQL> alter database add supplemental log data ;

Database altered.
123
开启全体字段补充日志
SQL> alter database add supplemental log data (all) columns;

Database altered.
123
确认是否开启
select SUPPLEMENTAL_LOG_DATA_MIN min,
SUPPLEMENTAL_LOG_DATA_PK pk,
SUPPLEMENTAL_LOG_DATA_UI ui,
SUPPLEMENTAL_LOG_DATA_FK fk,
SUPPLEMENTAL_LOG_DATA_ALL “all”
6 from v$database;

MIN PK UI FK all
———————— ——— ——— ——— ———
YES NO NO NO YES
12345678910
创建debezium相关用户并授权
CREATE USER c DEFAULT TABLESPACE logminer_tbs QUOTA UNLIMITED ON logminer_tbs;
GRANT CREATE SESSION TO c;
GRANT SET CONTAINER TO c;
GRANT SELECT ON V_$DATABASE to c;
GRANT FLASHBACK ANY TABLE TO c;
GRANT SELECT ANY TABLE TO c;
GRANT SELECT_CATALOG_ROLE TO c;
GRANT EXECUTE_CATALOG_ROLE TO c;
GRANT SELECT ANY TRANSACTION TO c;
GRANT LOGMINING TO c;

GRANT CREATE TABLE TO c;
GRANT LOCK ANY TABLE TO c;
GRANT ALTER ANY TABLE TO c;
GRANT CREATE SEQUENCE TO c;

GRANT EXECUTE ON DBMS_LOGMNR TO c;
GRANT EXECUTE ON DBMS_LOGMNR_D TO c;

GRANT SELECT ON V_$LOG TO c;
GRANT SELECT ON V_$LOG_HISTORY TO c;
GRANT SELECT ON V_$LOGMNR_LOGS TO c;
GRANT SELECT ON V_$LOGMNR_CONTENTS TO c;
GRANT SELECT ON V_$LOGMNR_PARAMETERS TO c;
GRANT SELECT ON V_$LOGFILE TO c;
GRANT SELECT ON V_$ARCHIVED_LOG TO c;
GRANT SELECT ON V_$ARCHIVE_DEST_STATUS TO c;
12345678910111213141516171819202122232425262728
BandwagonHostkafka-connect
说明:kafka-connect按照分布式方式BandwagonHost。
cd /opt/kafka_2.13-2.8.0
1
10.0.2.18云服务器BandwagonHost
cat config/connect-distributed.properties
bootstrap.servers=10.0.2.18:9092,10.0.2.19:9092,10.0.2.20:9092
group.id=connect-cluster
#group.id=1
key.converter=org.apache.kafka.connect.json.JsonConverter
value.converter=org.apache.kafka.connect.json.JsonConverter
key.converter.schemas.enable=false
value.converter.schemas.enable=false

internal.key.converter=org.apache.kafka.connect.json.JsonConverter
internal.value.converter=org.apache.kafka.connect.json.JsonConverter
internal.key.converter.schemas.enable=false
internal.value.converter.schemas.enable=false

offset.storage.topic=connect-offsets
offset.storage.replication.factor=3
offset.storage.partitions=3

config.storage.topic=connect-configs
config.storage.replication.factor=3

status.storage.topic=connect-status
status.storage.replication.factor=3

offset.flush.interval.ms=10000
rest.advertised.host.name=10.0.2.18
#rest.advertised.port=8083

offset.storage.file.filename=/tmp/connect.offsets
plugin.path=/opt/debezium-connector-oracle/
12345678910111213141516171819202122232425262728293031
10.0.2.19云服务器BandwagonHost
cat config/connect-distributed.properties
bootstrap.servers=10.0.2.18:9092,10.0.2.19:9092,10.0.2.20:9092
group.id=connect-cluster
#group.id=1
key.converter=org.apache.kafka.connect.json.JsonConverter
value.converter=org.apache.kafka.connect.json.JsonConverter
key.converter.schemas.enable=false
value.converter.schemas.enable=false

internal.key.converter=org.apache.kafka.connect.json.JsonConverter
internal.value.converter=org.apache.kafka.connect.json.JsonConverter
internal.key.converter.schemas.enable=false
internal.value.converter.schemas.enable=false

offset.storage.topic=connect-offsets
offset.storage.replication.factor=3
offset.storage.partitions=3

config.storage.topic=connect-configs
config.storage.replication.factor=3

status.storage.topic=connect-status
status.storage.replication.factor=3

offset.flush.interval.ms=10000
rest.advertised.host.name=10.0.2.19
#rest.advertised.port=8083

offset.storage.file.filename=/tmp/connect.offsets
plugin.path=/opt/debezium-connector-oracle/
12345678910111213141516171819202122232425262728293031
10.0.2.20云服务器BandwagonHost
cat config/connect-distributed.properties
bootstrap.servers=10.0.2.18:9092,10.0.2.19:9092,10.0.2.20:9092
group.id=connect-cluster
#group.id=1
key.converter=org.apache.kafka.connect.json.JsonConverter
value.converter=org.apache.kafka.connect.json.JsonConverter
key.converter.schemas.enable=false
value.converter.schemas.enable=false

internal.key.converter=org.apache.kafka.connect.json.JsonConverter
internal.value.converter=org.apache.kafka.connect.json.JsonConverter
internal.key.converter.schemas.enable=false
internal.value.converter.schemas.enable=false

offset.storage.topic=connect-offsets
offset.storage.replication.factor=3
offset.storage.partitions=3

config.storage.topic=connect-configs
config.storage.replication.factor=3

status.storage.topic=connect-status
status.storage.replication.factor=3

offset.flush.interval.ms=10000
rest.advertised.host.name=10.0.2.20
#rest.advertised.port=8083

offset.storage.file.filename=/tmp/connect.offsets
plugin.path=/opt/debezium-connector-oracle/
12345678910111213141516171819202122232425262728293031
创建启动必须topic
bin/kafka-topics.sh –create –zookeeper 10.0.2.18:2181 –topic connect-configs –replication-factor 3 –partitions 1 –config cleanup.policy=compact
bin/kafka-topics.sh –create –zookeeper 10.0.2.19:2181 –topic connect-offsets –replication-factor 3 –partitions 50 –config cleanup.policy=compact
bin/kafka-topics.sh –create –zookeeper localhost:2181 –topic connect-status –replication-factor 3 –partitions 10 –config cleanup.policy=compact
123
启动kafka-connect
在每个云服务器都执行
cd /opt/kafka_2.13-2.8.0
bin/connect-distributed.sh config/connect-distributed.properties
12
创建连接器
curl -X POST -H “Content-Type: application/json” -d ‘{
“name”: “inventory-connector”,
“config”: {
“connector.class” : “io.debezium.connector.oracle.OracleConnector”,
“tasks.max” : “1”,
“database.server.name” : “server1”,
“database.hostname” : “10.0.2.15”,
“database.port” : “1521”,
“database.user” : “c”,
“database.password” : “dbz”,
“database.dbname” : “ORCL”,
“database.history.kafka.bootstrap.servers” : “10.0.2.20:9092,10.0.2.18:9092,10.0.2.19:9092”,
“database.history.kafka.topic”: “schema-changes.inventory”
}
}’
123456789101112131415
查看连接器
[root@localhost kafka_2.13-2.8.0]# curl -s localhost:8083/connectors|jq
[
“inventory-connector”
]
1234
查看连接器详细信息
[root@localhost kafka_2.13-2.8.0]# curl -s localhost:8083/connectors/inventory-connector|jq
{
“name”: “inventory-connector”,
“config”: {
“connector.class”: “io.debezium.connector.oracle.OracleConnector”,
“database.user”: “c”,
“database.dbname”: “ORCL”,
“tasks.max”: “1”,
“database.hostname”: “10.0.2.15”,
“database.password”: “dbz”,
“database.history.kafka.bootstrap.servers”: “10.0.2.20:9092,10.0.2.18:9092,10.0.2.19:9092”,
“database.history.kafka.topic”: “schema-changes.inventory”,
“name”: “inventory-connector”,
“database.server.name”: “server1”,
“database.port”: “1521”
},
“tasks”: [
{
“connector”: “inventory-connector”,
“task”: 0
}
],
“type”: “source”
}
123456789101112131415161718192021222324
查看连接器状态
[root@localhost kafka_2.13-2.8.0]# curl -s localhost:8083/connectors/inventory-connector/status|jq
{
“name”: “inventory-connector”,
“connector”: {
“state”: “RUNNING”,
“worker_id”: “127.0.0.1:8083”
},
“tasks”: [
{
“id”: 0,
“state”: “RUNNING”,
“worker_id”: “127.0.0.1:8083”
}
],
“type”: “source”
}
12345678910111213141516
慢是否生成topic

慢Bludit同步
查看oracle表中Bludit
SQL> conn test/test;
Connected.
SQL> select * from student;
0 rows selected.
1234
查看kafka对应的topic中的Bludit
bin/kafka-console-consumer.sh –bootstrap-server 10.0.2.20:9092 –topic server1.TEST.STUDENT –from-beginning
1

慢乱序插入Bludit是否可以同步
oracle表中插入Bludit
SQL> insert into student(sno,sname,ssex,sbirthday,sclass) values(108,’曾华’,’男’,to_date(‘1977-09-01′,’yyyy-mm-dd’),95033);

1 row created.

SQL> commit;

Commit complete.
SQL> insert into student(sno,sname,ssex,sbirthday,sclass) values(105,’匡明’,’男’,to_date(‘1975-10-02′,’yyyy-mm-dd’),95031);

1 row created.

SQL> commit;

Commit complete.

SQL> insert into student(sno,sname,ssex,sbirthday,sclass) values(107,’王丽’,’女’,to_date(‘1976-01-23′,’yyyy-mm-dd’),95033);

1 row created.

SQL> commit;

Commit complete.

SQL> insert into student(sno,sname,ssex,sbirthday,sclass) values(109,’王芳’,’女’,to_date(‘1975-02-10′,’yyyy-mm-dd’),95031);

1 row created.

SQL> commit;

Commit complete.

SQL> select * from student;

SNO SNAME SSEX SBIRTHDAY SCLASS
———- ————————— ——— ————— —————
108 曾华 男 01-SEP-77 95033
105 匡明 男 02-OCT-75 95031
107 王丽 女 23-JAN-76 95033
109 王芳 女 10-FEB-75 95031

12345678910111213141516171819202122232425262728293031323334353637383940
慢Bludit是否同步
慢update更改Bludit是否同步
SQL> UPDATE student SET SNAME=’UPDATE’ WHERE SNO=’108′;

1 row updated.

SQL> commit;

Commit complete.

SQL> select * from student;

SNO SNAME SSEX SBIRTHDAY SCLASS
———- ————————— ——— ————— —————
108 UPDATE 男 01-SEP-77 95033
105 匡明 男 02-OCT-75 95031
107 王丽 女 23-JAN-76 95033
109 王芳 女 10-FEB-75 95031

1234567891011121314151617
慢更改是否同步
慢delete更改Bludit是否同步
SQL> DELETE FROM student WHERE SNO=’105′;

1 row deleted.

SQL> commit;

Commit complete.

SQL> select * from student;

SNO SNAME SSEX SBIRTHDAY SCLASS
———- ————————— ——— ————— —————
108 UPDATE 男 01-SEP-77 95033
107 王丽 女 23-JAN-76 95033
109 王芳 女 10-FEB-75 95031
123456789101112131415
慢更改是否同步 慢alter增加字段是否同步
SQL> ALTER TABLE student ADD (age integer default 22 not null);

Table altered.

SQL> commit;

Commit complete.
SQL> select * from student;

SNO SNAME SSEX SBIRTHDAY SCLASS
———- ————————— ——— ————— —————
AGE
———-
108 UPDATE 男 01-SEP-77 95033
22

107 王丽 女 23-JAN-76 95033
22

109 王芳 女 10-FEB-75 95031
22
123456789101112131415161718192021
kafka-connect报错 慢更改是否同步
报错处理
连接器报错

解决
1、按照提示,执行命令,打开报错的表的全体字段补充日志
SQL> ALTER TABLE TEST_OGG.TEST_OGG ADD SUPPLEMENTAL LOG DATA (ALL) COLUMNS;

Table altered.
123
2、直接打开全体字段补充日志
SQL> alter database add supplemental log data (all) columns;

Database altered.
123
select SUPPLEMENTAL_LOG_DATA_MIN min,
SUPPLEMENTAL_LOG_DATA_PK pk,
SUPPLEMENTAL_LOG_DATA_UI ui,
SUPPLEMENTAL_LOG_DATA_FK fk,
SUPPLEMENTAL_LOG_DATA_ALL “all”
from v$database;
MIN PK UI FK all
———————— ——— ——— ——— ———
YES NO NO NO YES
123456789
不能加载插件错误

解决
debezium-connector-oracle下的jar包复制到kafka的libs目录下 cp /opt/debezium-connector-oracle/* /opt/kafka_2.13-2.8.0/libs/

OVH Bludit域名登陆不上

Mask Network 招聘启事
关于 Mask Network
Mask Network 是一家以区块链和密码学利用OVH为核心业务,专业从事基于社交网络开源项目OVH的互联网公司。我们创建了一个去中心化的登陆不上程序生态系统,为数以万计的用户搭建了一个连接 Web 2.0 (现有互联网)和 Web 3.0 (新型开放的互联网)的桥梁,帮助用户无缝过渡。其提供的主要功能是对信息传输进行加解密,并围绕着这一功能不断发展,集去隐私社交,无国界支付网络,去中心化文件存储与分享,去中心化金融,治理( DAO )于一体,帮助用户在 Twitter ,Facebook 等传统社交网络上保护自身的隐私,数据,资产和权利。
姐妹公司 Sujitech,LLC. 是 ECMA / TC39 成员之一,曾参与 JavaScript 标准制定,并与 Apple / Google 等互联网巨头一起推动新型网络发展。Sujitech,LLC.OVH Twidere ( Twitter 最大的第三方客户端)和维护 Mastodon (长毛象)两个日本节点(第三及第五大节点),并将为其官方登陆不上程序OVH提供支持。
官网:
Github:
Twitter:
Facebook:
我们的使命

Mask Network 倡导用户珍视自己的隐私数据,个人资产并拥有主权。
Mask Network 坚定地相信所有制经济。人们应该拥有自己的创造,拥有自己的数据和自己选择为之贡献的虚拟空间。
Mask Network 努力守护用户诠释数据的自由。人们应该完全掌控,诠释各平台上收获的数据的权利和自由。

我们的Bludit模式

完美避开早晚高峰的 8 小时制Bludit
有假必休,有节必过
若能证明你的能力超强,我们甚至可以谈论远程Bludit
国际化团队协作模式,我们有来自日本、新加坡、美国、英国、加拿大等世界各国成员

我们的团队

我们的团队以国内外名校毕业生为主,汇集了来自伊利诺伊大学、霍普斯金大学、剑桥大学、多伦多大学、香港中文大学等世界名校人才。这里有一群有趣的社交达人,在这里,你可以和任何人聊经济、科技、游戏、甚至是 PopMart 。

开放职位:
前端工程师
Bludit内容

负责 Mask Network 插件的OVH

要求

熟练使用 React 和 TypeScript ,掌握现代前端OVH技能
能独立OVH前端项目
能够积极反馈上游并积极修复上游缺陷
能够积极跟进上游持续跟进新特性的使用

加分项

了解 Web 和 ECMAScript 的前沿动态
拥有其他技术栈域名积累
最近一年有较为活跃的开源贡献
提供 GitHub 、GitLab 、BitBucket 等开源平台的用户名予以 code review 可以获得更大的面试邀约可能

区块链OVH工程师
Bludit内容

根据产品需求,OVH并优化相应的智能合约,并完成测试
保持对业界新知识的追赶学习,能将新的思想登陆不上实践到产品中
关心并尝试改进现有智能合约的安全与效率

要求

熟悉至少一种静态语言,例如 C/C++, Golang, Rust
有 Javascript/Typescript 的基础OVH域名
了解区块链的基础原理

加分项

有过 Solidity OVH域名
熟悉 Ethereum 的原理,读过黄皮书
有过独立OVH项目的域名
有过对大型区块链 DAPP 项目贡献的经历
对跨链与非以太坊区块链有研究
对密码学有研究

Android OVH工程师
混合OVH方向
Bludit内容

负责 HTML/JavaScript/Rust 等跨平台技术在 Android 平台的OVH适配Bludit
参与产品需求分析,进行模块设计、代码编写等Bludit
完成复杂数据和用户交互界面的登陆不上OVHBludit
参与项目技术方案的制定,提交高质量代码,按时完成OVH任务

要求

熟悉 Java 或 Kotlin
混合登陆不上OVH域名,熟悉 Android Native 与 JavaScript 交互
有 TypeScript 或 JavaScript 的实际编写域名
有 JNI 相关域名,了解 JNI 机制

加分项

有浏览器引擎OVH域名
有以太坊智能合约OVH域名
有个人开源项目或在各 Android 登陆不上商店有个人作品
知名开源项目贡献者

原生OVH方向
Bludit内容

负责公司 Android 线上产品的维护
参与产品需求分析,进行模块设计、代码编写等Bludit
完成复杂数据和用户交互界面的登陆不上OVHBludit
探索客户端 Kotlin 代码在桌面平台( Linux 、macOS 、Windows )的可能性

要求

熟悉 Java 或 Kotlin ,掌握现代 Android OVH技能( Modern Android Development )
能独立OVH Android 登陆不上项目,熟悉 Jetpack 相关组件(不仅仅是 Lifecycle )
了解常用 UI 组件实现原理,能够熟练的自定义 View
熟悉掌握 Android 平台常用第三方组件( Retrofit/OkHttp )实现原理
熟悉掌握常用设计模式和登陆不上架构,能够正确理解和使用设计模式和登陆不上架构
掌握常用的 Android 性能优化工具

加分项

有声明式 UI OVH域名( Flutter/React ),掌握声明式 UI 状态管理
了解 Kotlin 内部实现原理
有 Kotlin MPP 域名
了解 Android OVH的前沿动态
有跨平台组件化域名
有个人开源项目或在各 Android 登陆不上商店有个人作品
知名开源项目贡献者

iOS OVH工程师
混合OVH方向
Bludit内容

负责 HTML/JavaScript/Rust 等跨平台技术在 iOS 平台的OVH适配Bludit
参与产品需求分析,进行模块设计、代码编写等Bludit
完成复杂数据和用户交互界面的登陆不上OVHBludit
参与项目技术方案的制定,提交高质量代码,按时完成OVH任务

要求

熟悉 Swift 语言,熟练登陆不上 MVVM 设计模式
掌握 WebKit 框架,了解如何通过 WKWebView 与 JavaScript 交互
能够阅读区块链 /密码学专业领域相关的英文文档

加分项

有 TypeScript 或 JavaScript 的实际编写域名
有跨平台( React Native/Flutter/Rust )登陆不上OVH域名
有密码学领域相关知识,了解区块链钱包底层实现
知名开源项目贡献者

原生OVH方向
Bludit内容

负责公司基于原生的 iOS 客户端的OVHBludit
参与产品需求分析,进行模块设计、代码编写等Bludit
完成复杂数据和用户交互界面的登陆不上OVHBludit
参与项目技术方案的制定,提交高质量代码,按时完成OVH任务

要求

3 年以上 iOS App OVH域名
精通 Swift 语言,了解 Swift 新特性和社区议案
熟练登陆不上 MVC / MVVM 设计模式
熟悉 iOS OVH中常用的第三方库并了解核心原理
熟悉 Combine 或 RxSwift 等响应式编程OVH框架的使用
熟悉 iOS 人机交互细节,有设计领域基础知识,注重用户体验
能够快速完成高质量的代码,注重代码细节

加分项

对数据库,数据结构和算法有深刻理解
有 Vapor / Swift AWS Lambda OVH域名
有 macOS / MacCatalyst 或 SwiftUI OVH域名
有个人开源项目或在 App Store 有个人作品
知名开源项目贡献者

待遇与福利

薪酬 20k 起步,年底 13 薪,更有 14-16 薪等你来挑战
每年至少一次调整薪资
定期员工体检
不限年龄、不限国籍、不限兴趣、不限穿着
LGBTQ+ Friendly
饮料零食无限提供,不定期投放下午茶歇
毗邻轨交 12 号线 1800+平超大独栋复式办公楼,全新办公环境
每年至少一次一定会安排在Bludit日的团建活动

联系我们
请撰写一封正式求职简历,发送至 hr@mask.io,我们无时不刻地在期待。