Support
Quality
Security
License
Reuse
kandi has reviewed jedis and discovered the below as its top functions. This is intended to give you an instant insight into jedis implemented functionality, and help decide if they suit your requirements.
Redis Java client designed for performance and ease of use.
Getting started
<dependency>
<groupId>redis.clients</groupId>
<artifactId>jedis</artifactId>
<version>4.1.1</version>
</dependency>
Easier way of using connection pool
JedisPooled jedis = new JedisPooled("localhost", 6379);
Connecting to a Redis cluster
Set<HostAndPort> jedisClusterNodes = new HashSet<HostAndPort>();
jedisClusterNodes.add(new HostAndPort("127.0.0.1", 7379));
jedisClusterNodes.add(new HostAndPort("127.0.0.1", 7380));
JedisCluster jedis = new JedisCluster(jedisClusterNodes);
java.lang.VerifyError: Operand stack overflow for google-ads API and SBT
assemblyMergeStrategy in assembly := {
case x if x.contains("io.netty.versions.properties") => MergeStrategy.discard
case x =>
val oldStrategy = (assemblyMergeStrategy in assembly).value
oldStrategy(x)
}
lettuce can't connect to docker redis
services:
redis-0:
image: bitnami/redis-cluster
container_name: redis-0
restart: always
networks:
redis-net:
ipv4_address: 172.22.0.100
environment:
- 'REDIS_PORT_NUMBER=7000'
- 'ALLOW_EMPTY_PASSWORD=yes'
- 'REDIS_NODES=redis-0 redis-1 redis-2 redis-3 redis-4 redis-5'
- 'REDIS_CLUSTER_ANNOUNCE_PORT=7000'
- 'REDIS_CLUSTER_ANNOUNCE_IP=192.168.0.13' --local ip--
- 'REDIS_CLUSTER_BUS_ANNOUNCE_PORT=17000'
- 'REDIS_CLUSTER_DYNAMIC_IPS=no'
ports:
- "7000:7000"
- "17000:17000"
redis-1:
image: bitnami/redis-cluster
container_name: redis-1
restart: always
networks:
redis-net:
ipv4_address: 172.22.0.101
environment:
- 'REDIS_PORT_NUMBER=7001'
- 'ALLOW_EMPTY_PASSWORD=yes'
- 'REDIS_NODES=redis-0 redis-1 redis-2 redis-3 redis-4 redis-5'
- 'REDIS_CLUSTER_ANNOUNCE_PORT=7001'
- 'REDIS_CLUSTER_ANNOUNCE_IP=192.168.0.13' --local ip--
- 'REDIS_CLUSTER_BUS_ANNOUNCE_PORT=17001'
- 'REDIS_CLUSTER_DYNAMIC_IPS=no'
ports:
- "7001:7001"
- "17001:17001"
redis-2:
image: bitnami/redis-cluster
container_name: redis-2
restart: always
networks:
redis-net:
ipv4_address: 172.22.0.102
environment:
- 'REDIS_PORT_NUMBER=7002'
- 'ALLOW_EMPTY_PASSWORD=yes'
- 'REDIS_NODES=redis-0 redis-1 redis-2 redis-3 redis-4 redis-5'
- 'REDIS_CLUSTER_ANNOUNCE_PORT=7002'
- 'REDIS_CLUSTER_ANNOUNCE_IP=192.168.0.13' --local ip--
- 'REDIS_CLUSTER_BUS_ANNOUNCE_PORT=17002'
- 'REDIS_CLUSTER_DYNAMIC_IPS=no'
ports:
- "7002:7002"
- "17002:17002"
--- other nodes
redis-cluster-init:
image: redis:6.2
container_name: redis-cluster-init
restart: 'no'
networks:
redis-net:
ipv4_address: 172.22.0.106
depends_on:
- redis-0
- redis-1
- redis-2
- redis-3
- redis-4
- redis-5
entrypoint: []
command:
- /bin/bash
- -c
- redis-cli --cluster create 172.22.0.100:7000 172.22.0.101:7001 172.22.0.102:7002 172.22.0.103:7003 172.22.0.104:7004 172.22.0.105:7005 --cluster-replicas 1 --cluster-yes
redis_commander:
image: rediscommander/redis-commander:latest
container_name: redis_web
environment:
REDIS_HOSTS: "local:redis-0:7000,local:redis-1:7001,local:redis-2:7002,local:redis-3:7003,local:redis-4:7004,local:redis-5:7005"
ports:
- "5000:8081"
depends_on:
- redis-1
- redis-2
- redis-3
- redis-4
- redis-5
- redis-0
- redis-cluster-init
networks:
redis-net:
ipv4_address: 172.22.0.107
networks:
redis-net:
name: redis-net
driver: bridge
ipam:
config:
- subnet: 172.22.0.0/16
RedisURI redis0 = RedisURI.create("127.0.0.1",7000);
RedisURI redis1 = RedisURI.create("127.0.0.1",7001);
RedisURI redis2 = RedisURI.create("127.0.0.1",7002);
RedisClusterClient clusterClient = RedisClusterClient.create(Arrays.asList(redis1,redis2,redis0));
StatefulRedisClusterConnection<String, String> connection = clusterClient.connect();
RedisAdvancedClusterCommands<String, String> syncCommands = connection.sync();
-----------------------
services:
redis-0:
image: bitnami/redis-cluster
container_name: redis-0
restart: always
networks:
redis-net:
ipv4_address: 172.22.0.100
environment:
- 'REDIS_PORT_NUMBER=7000'
- 'ALLOW_EMPTY_PASSWORD=yes'
- 'REDIS_NODES=redis-0 redis-1 redis-2 redis-3 redis-4 redis-5'
- 'REDIS_CLUSTER_ANNOUNCE_PORT=7000'
- 'REDIS_CLUSTER_ANNOUNCE_IP=192.168.0.13' --local ip--
- 'REDIS_CLUSTER_BUS_ANNOUNCE_PORT=17000'
- 'REDIS_CLUSTER_DYNAMIC_IPS=no'
ports:
- "7000:7000"
- "17000:17000"
redis-1:
image: bitnami/redis-cluster
container_name: redis-1
restart: always
networks:
redis-net:
ipv4_address: 172.22.0.101
environment:
- 'REDIS_PORT_NUMBER=7001'
- 'ALLOW_EMPTY_PASSWORD=yes'
- 'REDIS_NODES=redis-0 redis-1 redis-2 redis-3 redis-4 redis-5'
- 'REDIS_CLUSTER_ANNOUNCE_PORT=7001'
- 'REDIS_CLUSTER_ANNOUNCE_IP=192.168.0.13' --local ip--
- 'REDIS_CLUSTER_BUS_ANNOUNCE_PORT=17001'
- 'REDIS_CLUSTER_DYNAMIC_IPS=no'
ports:
- "7001:7001"
- "17001:17001"
redis-2:
image: bitnami/redis-cluster
container_name: redis-2
restart: always
networks:
redis-net:
ipv4_address: 172.22.0.102
environment:
- 'REDIS_PORT_NUMBER=7002'
- 'ALLOW_EMPTY_PASSWORD=yes'
- 'REDIS_NODES=redis-0 redis-1 redis-2 redis-3 redis-4 redis-5'
- 'REDIS_CLUSTER_ANNOUNCE_PORT=7002'
- 'REDIS_CLUSTER_ANNOUNCE_IP=192.168.0.13' --local ip--
- 'REDIS_CLUSTER_BUS_ANNOUNCE_PORT=17002'
- 'REDIS_CLUSTER_DYNAMIC_IPS=no'
ports:
- "7002:7002"
- "17002:17002"
--- other nodes
redis-cluster-init:
image: redis:6.2
container_name: redis-cluster-init
restart: 'no'
networks:
redis-net:
ipv4_address: 172.22.0.106
depends_on:
- redis-0
- redis-1
- redis-2
- redis-3
- redis-4
- redis-5
entrypoint: []
command:
- /bin/bash
- -c
- redis-cli --cluster create 172.22.0.100:7000 172.22.0.101:7001 172.22.0.102:7002 172.22.0.103:7003 172.22.0.104:7004 172.22.0.105:7005 --cluster-replicas 1 --cluster-yes
redis_commander:
image: rediscommander/redis-commander:latest
container_name: redis_web
environment:
REDIS_HOSTS: "local:redis-0:7000,local:redis-1:7001,local:redis-2:7002,local:redis-3:7003,local:redis-4:7004,local:redis-5:7005"
ports:
- "5000:8081"
depends_on:
- redis-1
- redis-2
- redis-3
- redis-4
- redis-5
- redis-0
- redis-cluster-init
networks:
redis-net:
ipv4_address: 172.22.0.107
networks:
redis-net:
name: redis-net
driver: bridge
ipam:
config:
- subnet: 172.22.0.0/16
RedisURI redis0 = RedisURI.create("127.0.0.1",7000);
RedisURI redis1 = RedisURI.create("127.0.0.1",7001);
RedisURI redis2 = RedisURI.create("127.0.0.1",7002);
RedisClusterClient clusterClient = RedisClusterClient.create(Arrays.asList(redis1,redis2,redis0));
StatefulRedisClusterConnection<String, String> connection = clusterClient.connect();
RedisAdvancedClusterCommands<String, String> syncCommands = connection.sync();
How do I connect to an AWS ElastiCache for Redis Cluster using Jedis?
Set<HostAndPort> jedisClusterNodes = new HashSet<HostAndPort>();
jedisClusterNodes.add(new HostAndPort(CONFIGURATION_ENDPOINT, 6379));
try (JedisCluster jedisCluster = new JedisCluster(jedisClusterNodes)) {
// ...
}
Efficient way to get all the key value pair from redis cache using Jedis
public List<String> get(final String[] keys) {
try (Jedis jedis = jedisPool.getResource()) {
List<String> values = jedis.mget(keys);
return values;
} catch (Exception ex) {
log.error("Exception caught in mget", ex);
}
return null;
}
Why maven sometimes can't resolve dependencies?
<!-- https://mvnrepository.com/artifact/com.amazonaws/aws-java-sdk-bom -->
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-bom</artifactId>
<version>1.12.138</version>
<type>pom</type>
<scope>import</scope>
</dependency>
Failed to load ApplicationContext exception
Error creating bean with name 'dataSource'
Failed to determine a suitable driver class
-----------------------
Error creating bean with name 'dataSource'
Failed to determine a suitable driver class
Jedis cannot find master using sentinel when sentinel required password
JedisSentinelPool(String masterName, Set<String> sentinels, String password, String sentinelPassword)
Got NPE after integrating play framework with play-redis
play.modules.enabled += play.api.cache.redis.RedisCacheModule
# provide additional configuration in the custom module
play.modules.enabled += services.CustomCacheModule
play.cache.redis {
# do not bind default unqualified APIs
bind-default: false
# name of the instance in simple configuration,
# i.e., not located under `instances` key
# but directly under 'play.cache.redis'
default-cache: "redis"
source = custom
host = 127.0.0.1
# redis server: port
port = 6380
# redis server: database number (optional)
database = 0
# authentication password (optional)
password = "#########"
refresh-minute = 10
}
class CustomCacheModule extends AbstractModule {
override def configure(): Unit = {
// NamedCacheImpl's input used to be "play"
bind(classOf[RedisInstance]).annotatedWith(new NamedCacheImpl("redis")).to(classOf[CustomRedisInstance])
()
}
}
-----------------------
play.modules.enabled += play.api.cache.redis.RedisCacheModule
# provide additional configuration in the custom module
play.modules.enabled += services.CustomCacheModule
play.cache.redis {
# do not bind default unqualified APIs
bind-default: false
# name of the instance in simple configuration,
# i.e., not located under `instances` key
# but directly under 'play.cache.redis'
default-cache: "redis"
source = custom
host = 127.0.0.1
# redis server: port
port = 6380
# redis server: database number (optional)
database = 0
# authentication password (optional)
password = "#########"
refresh-minute = 10
}
class CustomCacheModule extends AbstractModule {
override def configure(): Unit = {
// NamedCacheImpl's input used to be "play"
bind(classOf[RedisInstance]).annotatedWith(new NamedCacheImpl("redis")).to(classOf[CustomRedisInstance])
()
}
}
Delete multiple Redis stream id with Jedis
String key;
StreamEntryID id1, id2, ..., idN;
...
jedis.xdel(key, id1, id2, ..., idN);
String key;
StreamEntryID[] ids;
...
jedis.xdel(key, ids);
-----------------------
String key;
StreamEntryID id1, id2, ..., idN;
...
jedis.xdel(key, id1, id2, ..., idN);
String key;
StreamEntryID[] ids;
...
jedis.xdel(key, ids);
Getting A component required a bean named 'redisTemplate' that could not be found. for multi different RedisTemplates configuration
@Configuration
@PropertySource("classpath:redis.properties")
@Slf4j
public class RedisConfiguration {
private static final String REDIS_PROPERTIES = "redis.properties";
private final Properties redisProperties = readConfigurationFile(REDIS_PROPERTIES);
@Value("${redis.host}")
private String host;
@Value("${redis.port}")
private int port;
@Value("${redis.password}")
private String password;
@Value("${redis.timeout}")
private String timeout;
@Bean(name = "jedisConnectionFactory")
JedisConnectionFactory jedisConnectionFactory() {
return new JedisConnectionFactory();
}
@Bean(name = "redisTemplate")
public RedisTemplate<String, Object> redisTemplate(
@Qualifier(value = "jedisConnectionFactory") RedisConnectionFactory redisConnectionFactory) {
RedisTemplate<String, Object> template = new RedisTemplate<>();
template.setConnectionFactory(redisConnectionFactory);
template.setKeySerializer(stringRedisSerializer());
template.setValueSerializer(stringRedisSerializer());
return template;
}
@Bean(name = "redisUserConnectionFactory")
public JedisConnectionFactory redisUserConnectionFactory() {
RedisStandaloneConfiguration redisConfiguration = new RedisStandaloneConfiguration();
String userDb = getProperty(redisProperties, RedisDb.USER_DB);
setRedisProperties(redisConfiguration, userDb);
JedisClientConfiguration jedisClientConfiguration =
JedisClientConfiguration.builder()
.connectTimeout(Duration.ofMillis(Long.parseLong(timeout)))
.build();
logRedisConnectionDetails(redisConfiguration);
return new JedisConnectionFactory(redisConfiguration, jedisClientConfiguration);
}
@Bean(name = "userRedisTemplate")
public RedisTemplate<String, Object> userRedisTemplate(
@Qualifier(value = "redisUserConnectionFactory")
RedisConnectionFactory redisConnectionFactory) {
RedisTemplate<String, Object> template = new RedisTemplate<>();
template.setConnectionFactory(redisConnectionFactory);
template.setKeySerializer(stringRedisSerializer());
template.setValueSerializer(stringRedisSerializer());
return template;
}
@Bean(name = "redisRegistrationTokenConnectionFactory")
public JedisConnectionFactory redisRegistrationTokenConnectionFactory() {
RedisStandaloneConfiguration redisConfiguration = new RedisStandaloneConfiguration();
String registrationTokenDb = getProperty(redisProperties, RedisDb.REGISTRATION_TOKEN_DB);
setRedisProperties(redisConfiguration, registrationTokenDb);
JedisClientConfiguration jedisClientConfiguration =
JedisClientConfiguration.builder()
.connectTimeout(Duration.ofMillis(Long.parseLong(timeout)))
.build();
logRedisConnectionDetails(redisConfiguration);
return new JedisConnectionFactory(redisConfiguration, jedisClientConfiguration);
}
@Bean(name = "registrationTokenRedisTemplate")
public RedisTemplate<String, Object> registrationTokenRedisTemplate(
@Qualifier(value = "redisRegistrationTokenConnectionFactory")
RedisConnectionFactory redisConnectionFactory) {
RedisTemplate<String, Object> template = new RedisTemplate<>();
template.setConnectionFactory(redisConnectionFactory);
template.setKeySerializer(stringRedisSerializer());
template.setValueSerializer(stringRedisSerializer());
return template;
}
@Bean(name = "stringRedisSerializer")
public StringRedisSerializer stringRedisSerializer() {
return new StringRedisSerializer();
}
private void setRedisProperties(RedisStandaloneConfiguration redisConfiguration, String redisDb) {
redisConfiguration.setHostName(host);
redisConfiguration.setPort(port);
redisConfiguration.setDatabase(Integer.parseInt(redisDb));
redisConfiguration.setPassword(RedisPassword.of(password));
}
private void logRedisConnectionDetails(RedisStandaloneConfiguration redisConfiguration) {
log.info(
"Connected to Redis host: {}, port: {}, database: {}",
redisConfiguration.getHostName(),
redisConfiguration.getPort(),
redisConfiguration.getDatabase());
}
}
QUESTION
java.lang.VerifyError: Operand stack overflow for google-ads API and SBT
Asked 2022-Mar-03 at 07:10I am trying to migrate from Google-AdWords to google-ads-v10 API in spark 3.1.1 in EMR. I am facing some dependency issues due to conflicts with existing jars. Initially, we were facing a dependency related to Protobuf jar:
Exception in thread "grpc-default-executor-0" java.lang.IllegalAccessError: tried to access field com.google.protobuf.AbstractMessage.memoizedSize from class com.google.ads.googleads.v10.services.SearchGoogleAdsRequest
at com.google.ads.googleads.v10.services.SearchGoogleAdsRequest.getSerializedSize(SearchGoogleAdsRequest.java:394)
at io.grpc.protobuf.lite.ProtoInputStream.available(ProtoInputStream.java:108)
In order to resolve this, tried to shade the Protobuf jar and have a uber-jar instead. After the shading, running my project locally in IntelliJ works fine, But when trying to run an executable jar I created I get the following error:
Exception in thread "main" io.grpc.ManagedChannelProvider$ProviderNotFoundException: No functional channel service provider found. Try adding a dependency on the grpc-okhttp, grpc-netty, or grpc-netty-shaded artifact
I tried adding all those libraries in --spark.jars.packages
but it didn't help.
java.lang.VerifyError: Operand stack overflow
Exception Details:
Location:
io/grpc/internal/TransportTracer.getStats()Lio/grpc/InternalChannelz$TransportStats; ...
...
...
at io.grpc.netty.shaded.io.grpc.netty.NettyChannelBuilder.<init>(NettyChannelBuilder.java:96)
at io.grpc.netty.shaded.io.grpc.netty.NettyChannelBuilder.forTarget(NettyChannelBuilder.java:169)
at io.grpc.netty.shaded.io.grpc.netty.NettyChannelBuilder.forAddress(NettyChannelBuilder.java:152)
at io.grpc.netty.shaded.io.grpc.netty.NettyChannelProvider.builderForAddress(NettyChannelProvider.java:38)
at io.grpc.netty.shaded.io.grpc.netty.NettyChannelProvider.builderForAddress(NettyChannelProvider.java:24)
at io.grpc.ManagedChannelBuilder.forAddress(ManagedChannelBuilder.java:39)
at com.google.api.gax.grpc.InstantiatingGrpcChannelProvider.createSingleChannel(InstantiatingGrpcChannelProvider.java:348)
Has anyone ever encountered such an issue?
Build.sbt
lazy val dependencies = new {
val sparkRedshift = "io.github.spark-redshift-community" %% "spark-redshift" % "5.0.3" % "provided" excludeAll (ExclusionRule(organization = "com.amazonaws"))
val jsonSimple = "com.googlecode.json-simple" % "json-simple" % "1.1" % "provided"
val googleAdsLib = "com.google.api-ads" % "google-ads" % "17.0.1"
val jedis = "redis.clients" % "jedis" % "3.0.1" % "provided"
val sparkAvro = "org.apache.spark" %% "spark-avro" % sparkVersion % "provided"
val queryBuilder = "com.itfsw" % "QueryBuilder" % "1.0.4" % "provided" excludeAll (ExclusionRule(organization = "com.fasterxml.jackson.core"))
val protobufForGoogleAds = "com.google.protobuf" % "protobuf-java" % "3.18.1"
val guavaForGoogleAds = "com.google.guava" % "guava" % "31.1-jre"
}
libraryDependencies ++= Seq(
dependencies.sparkRedshift, dependencies.jsonSimple, dependencies.googleAdsLib,dependencies.guavaForGoogleAds,dependencies.protobufForGoogleAds
,dependencies.jedis, dependencies.sparkAvro,
dependencies.queryBuilder
)
dependencyOverrides ++= Set(
dependencies.guavaForGoogleAds
)
assemblyShadeRules in assembly := Seq(
ShadeRule.rename("com.google.protobuf.**" -> "repackaged.protobuf.@1").inAll
)
assemblyMergeStrategy in assembly := {
case PathList("META-INF", xs@_*) => MergeStrategy.discard
case PathList("module-info.class", xs@_*) => MergeStrategy.discard
case x => MergeStrategy.first
}
ANSWER
Answered 2022-Mar-02 at 18:58I had a similar issue and I changed the assembly merge strategy to this:
assemblyMergeStrategy in assembly := {
case x if x.contains("io.netty.versions.properties") => MergeStrategy.discard
case x =>
val oldStrategy = (assemblyMergeStrategy in assembly).value
oldStrategy(x)
}
Community Discussions, Code Snippets contain sources that include Stack Exchange Network
No vulnerabilities reported
Save this library and start creating your kit
Explore Related Topics
Save this library and start creating your kit